aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 21:04:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 21:04:47 -0700
commit3b59bf081622b6446db77ad06c93fe23677bc533 (patch)
tree3f4bb5a27c90cc86994a1f6d3c53fbf9208003cb /drivers/net
parentMerge branch 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq (diff)
parentFix pppol2tp getsockname() (diff)
downloadlinux-dev-3b59bf081622b6446db77ad06c93fe23677bc533.tar.xz
linux-dev-3b59bf081622b6446db77ad06c93fe23677bc533.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking merge from David Miller: "1) Move ixgbe driver over to purely page based buffering on receive. From Alexander Duyck. 2) Add receive packet steering support to e1000e, from Bruce Allan. 3) Convert TCP MD5 support over to RCU, from Eric Dumazet. 4) Reduce cpu usage in handling out-of-order TCP packets on modern systems, also from Eric Dumazet. 5) Support the IP{,V6}_UNICAST_IF socket options, making the wine folks happy, from Erich Hoover. 6) Support VLAN trunking from guests in hyperv driver, from Haiyang Zhang. 7) Support byte-queue-limtis in r8169, from Igor Maravic. 8) Outline code intended for IP_RECVTOS in IP_PKTOPTIONS existed but was never properly implemented, Jiri Benc fixed that. 9) 64-bit statistics support in r8169 and 8139too, from Junchang Wang. 10) Support kernel side dump filtering by ctmark in netfilter ctnetlink, from Pablo Neira Ayuso. 11) Support byte-queue-limits in gianfar driver, from Paul Gortmaker. 12) Add new peek socket options to assist with socket migration, from Pavel Emelyanov. 13) Add sch_plug packet scheduler whose queue is controlled by userland daemons using explicit freeze and release commands. From Shriram Rajagopalan. 14) Fix FCOE checksum offload handling on transmit, from Yi Zou." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1846 commits) Fix pppol2tp getsockname() Remove printk from rds_sendmsg ipv6: fix incorrent ipv6 ipsec packet fragment cpsw: Hook up default ndo_change_mtu. net: qmi_wwan: fix build error due to cdc-wdm dependecy netdev: driver: ethernet: Add TI CPSW driver netdev: driver: ethernet: add cpsw address lookup engine support phy: add am79c874 PHY support mlx4_core: fix race on comm channel bonding: send igmp report for its master fs_enet: Add MPC5125 FEC support and PHY interface selection net: bpf_jit: fix BPF_S_LDX_B_MSH compilation net: update the usage of CHECKSUM_UNNECESSARY fcoe: use CHECKSUM_UNNECESSARY instead of CHECKSUM_PARTIAL on tx net: do not do gso for CHECKSUM_UNNECESSARY in netif_needs_gso ixgbe: Fix issues with SR-IOV loopback when flow control is disabled net/hyperv: Fix the code handling tx busy ixgbe: fix namespace issues when FCoE/DCB is not enabled rtlwifi: Remove unused ETH_ADDR_LEN defines igbvf: Use ETH_ALEN ... Fix up fairly trivial conflicts in drivers/isdn/gigaset/interface.c and drivers/net/usb/{Kconfig,qmi_wwan.c} as per David.
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_alb.c12
-rw-r--r--drivers/net/bonding/bond_main.c18
-rw-r--r--drivers/net/caif/caif_hsi.c145
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/bfin_can.c36
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/dev.c33
-rw-r--r--drivers/net/can/flexcan.c61
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/mscan/mscan.c33
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig28
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c505
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c753
-rw-r--r--drivers/net/can/sja1000/plx_pci.c22
-rw-r--r--drivers/net/can/sja1000/sja1000.c19
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/can/ti_hecc.c32
-rw-r--r--drivers/net/can/usb/Kconfig6
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c63
-rw-r--r--drivers/net/can/usb/esd_usb2.c27
-rw-r--r--drivers/net/can/usb/peak_usb/Makefile2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c899
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c951
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h146
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c1036
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h178
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c10
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c10
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c19
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c13
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c27
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c5
-rw-r--r--drivers/net/ethernet/alteon/acenic.c5
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c4
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c12
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/depca.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c10
-rw-r--r--drivers/net/ethernet/amd/ni65.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c16
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/apple/bmac.c19
-rw-r--r--drivers/net/ethernet/apple/mace.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h5
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.h1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c486
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c319
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h62
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h55
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c324
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c974
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c432
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h147
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c42
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h27
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1583
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c21
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c42
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c4
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c52
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c76
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c8
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c20
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c21
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c7
-rw-r--r--drivers/net/ethernet/dlink/de600.c2
-rw-r--r--drivers/net/ethernet/dlink/de620.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c11
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h111
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c245
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h137
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c187
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1039
-rw-r--r--drivers/net/ethernet/ethoc.c23
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.c21
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c32
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c24
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c460
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c2
-rw-r--r--drivers/net/ethernet/fujitsu/eth16i.c2
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c2
-rw-r--r--drivers/net/ethernet/i825xx/3c507.c2
-rw-r--r--drivers/net/ethernet/i825xx/3c523.c2
-rw-r--r--drivers/net/ethernet/i825xx/3c527.c4
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c2
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c2
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c2
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c4
-rw-r--r--drivers/net/ethernet/i825xx/ni52.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c27
-rw-r--r--drivers/net/ethernet/i825xx/znet.c2
-rw-r--r--drivers/net/ethernet/ibm/Kconfig4
-rw-r--r--drivers/net/ethernet/ibm/Makefile1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_hw.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c37
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h15
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h4
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h2
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c1710
-rw-r--r--drivers/net/ethernet/icplus/ipg.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c84
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c156
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c249
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c99
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c147
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile5
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h52
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c207
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c433
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c (renamed from drivers/net/ethernet/intel/e1000e/lib.c)1132
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c367
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c722
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c643
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c327
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c44
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c143
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h12
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c12
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h225
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c69
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c289
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c929
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2991
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
-rw-r--r--drivers/net/ethernet/jme.c1
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c8
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c11
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c8
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c9
-rw-r--r--drivers/net/ethernet/mipsnet.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c5
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/netx-eth.c5
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c11
-rw-r--r--drivers/net/ethernet/nxp/Kconfig8
-rw-r--r--drivers/net/ethernet/nxp/Makefile1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1604
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig13
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h13
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c230
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c8
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c13
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h441
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c296
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c109
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c637
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c17
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c138
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c86
-rw-r--r--drivers/net/ethernet/racal/ni5010.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c3
-rw-r--r--drivers/net/ethernet/realtek/8139too.c109
-rw-r--r--drivers/net/ethernet/realtek/Kconfig10
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1594
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c439
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h15
-rw-r--r--drivers/net/ethernet/s6gmac.c15
-rw-r--r--drivers/net/ethernet/seeq/ether3.c2
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h22
-rw-r--r--drivers/net/ethernet/sfc/efx.c931
-rw-r--r--drivers/net/ethernet/sfc/efx.h11
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c200
-rw-r--r--drivers/net/ethernet/sfc/falcon.c54
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c25
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c15
-rw-r--r--drivers/net/ethernet/sfc/filter.c255
-rw-r--r--drivers/net/ethernet/sfc/filter.h20
-rw-r--r--drivers/net/ethernet/sfc/mac.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c149
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h36
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c65
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c415
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3542
-rw-r--r--drivers/net/ethernet/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c15
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h325
-rw-r--r--drivers/net/ethernet/sfc/nic.c602
-rw-r--r--drivers/net/ethernet/sfc/nic.h141
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/ethernet/sfc/regs.h20
-rw-r--r--drivers/net/ethernet/sfc/rx.c133
-rw-r--r--drivers/net/ethernet/sfc/selftest.c218
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c50
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1643
-rw-r--r--drivers/net/ethernet/sfc/spi.h2
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h255
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c9
-rw-r--r--drivers/net/ethernet/smsc/epic100.c11
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c74
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c12
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c13
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1019
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c641
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h93
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilepro.c9
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c5
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c9
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c5
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig8
-rw-r--r--drivers/net/ethernet/xilinx/Makefile2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c9
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h508
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1669
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c238
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c6
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.c7
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hippi/rrunner.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h34
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c20
-rw-r--r--drivers/net/hyperv/rndis_filter.c159
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c6
-rw-r--r--drivers/net/irda/via-ircc.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/mdio.c3
-rw-r--r--drivers/net/mii.c4
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c102
-rw-r--r--drivers/net/phy/broadcom.c6
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_deflate.c30
-rw-r--r--drivers/net/ppp/ppp_generic.c6
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/ppp/pppox.c2
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/madgemc.c1
-rw-r--r--drivers/net/tokenring/tms380tr.c179
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix.c88
-rw-r--r--drivers/net/usb/cdc_ncm.c236
-rw-r--r--drivers/net/usb/kaweth.c8
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c63
-rw-r--r--drivers/net/wan/c101.c4
-rw-r--r--drivers/net/wan/dscc4.c8
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/n2.c4
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c4
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/ath/ath.h26
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h5
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig25
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile33
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h24
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1002
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h13
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c299
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h169
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c431
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h38
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c213
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h16
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c636
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c234
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c183
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c103
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c592
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c432
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c451
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig24
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile10
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h270
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c149
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h302
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c113
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c74
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1126
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h233
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h1833
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h465
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h59
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c77
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c400
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c124
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c156
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c323
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h224
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c96
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c290
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c70
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c59
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c33
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c35
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c36
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/main.c9
-rw-r--r--drivers/net/wireless/atmel.c5
-rw-r--r--drivers/net/wireless/b43/b43.h15
-rw-r--r--drivers/net/wireless/b43/main.c128
-rw-r--r--drivers/net/wireless/b43/phy_n.c532
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c76
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h14
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h3
-rw-r--r--drivers/net/wireless/b43legacy/main.c33
-rw-r--r--drivers/net/wireless/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig17
-rw-r--r--drivers/net/wireless/brcm80211/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c71
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c264
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c1621
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.h61
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h75
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c55
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c199
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c35
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c26
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h15
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c18
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c108
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c5
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c286
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c7
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c228
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h14
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c36
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c1058
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c48
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c698
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h43
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig78
-rw-r--r--drivers/net/wireless/iwlegacy/common.c1112
-rw-r--r--drivers/net/wireless/iwlegacy/common.h471
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c291
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig11
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c89
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c137
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c110
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c213
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c115
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c396
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c174
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1241
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h209
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-cfg.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h110
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c262
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans.c)68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c993
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h123
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c240
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h165
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h177
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c224
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c505
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c157
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h (renamed from drivers/net/wireless/iwlwifi/iwl-wifi.h)81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h216
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c264
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h400
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c552
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h99
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c439
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c238
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c918
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c428
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h2
-rw-r--r--drivers/net/wireless/libertas/cfg.c37
-rw-r--r--drivers/net/wireless/libertas/if_cs.c5
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c56
-rw-r--r--drivers/net/wireless/mwifiex/11n.c82
-rw-r--r--drivers/net/wireless/mwifiex/11n.h16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c18
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c211
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h5
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c176
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c90
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c177
-rw-r--r--drivers/net/wireless/mwifiex/decl.h11
-rw-r--r--drivers/net/wireless/mwifiex/fw.h19
-rw-r--r--drivers/net/wireless/mwifiex/init.c39
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h4
-rw-r--r--drivers/net/wireless/mwifiex/join.c184
-rw-r--r--drivers/net/wireless/mwifiex/main.c70
-rw-r--r--drivers/net/wireless/mwifiex/main.h44
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c394
-rw-r--r--drivers/net/wireless/mwifiex/scan.c430
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c196
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c235
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c356
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c43
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c147
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c25
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c17
-rw-r--r--drivers/net/wireless/mwifiex/util.c21
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c167
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h4
-rw-r--r--drivers/net/wireless/mwl8k.c45
-rw-r--r--drivers/net/wireless/orinoco/main.c6
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c5
-rw-r--r--drivers/net/wireless/p54/main.c8
-rw-r--r--drivers/net/wireless/p54/p54pci.c48
-rw-r--r--drivers/net/wireless/p54/p54spi.c14
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c6
-rw-r--r--drivers/net/wireless/rndis_wlan.c61
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h81
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c431
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c232
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h38
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c21
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c110
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h9
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig5
-rw-r--r--drivers/net/wireless/rtlwifi/base.c73
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c85
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c170
-rw-r--r--drivers/net/wireless/rtlwifi/core.h6
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c4
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h121
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c65
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c280
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h3
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c39
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rc.h2
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c14
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c212
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c99
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c281
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c26
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c288
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c113
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c87
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/table.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/table.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c26
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c464
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c133
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c126
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/reg.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c75
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c44
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c243
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c137
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c202
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/led.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/led.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c810
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/reg.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c99
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c57
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/table.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/table.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.c76
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c272
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/led.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/led.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c261
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c66
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c88
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/table.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/table.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c110
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h8
-rw-r--r--drivers/net/wireless/wl1251/Makefile2
-rw-r--r--drivers/net/wireless/wl1251/boot.c2
-rw-r--r--drivers/net/wireless/wl1251/io.h9
-rw-r--r--drivers/net/wireless/wl1251/main.c7
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c17
-rw-r--r--drivers/net/wireless/wl12xx/acx.h149
-rw-r--r--drivers/net/wireless/wl12xx/boot.c105
-rw-r--r--drivers/net/wireless/wl12xx/boot.h10
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c169
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h148
-rw-r--r--drivers/net/wireless/wl12xx/conf.h54
-rw-r--r--drivers/net/wireless/wl12xx/debug.h1
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c241
-rw-r--r--drivers/net/wireless/wl12xx/event.c154
-rw-r--r--drivers/net/wireless/wl12xx/event.h20
-rw-r--r--drivers/net/wireless/wl12xx/init.c55
-rw-r--r--drivers/net/wireless/wl12xx/io.c59
-rw-r--r--drivers/net/wireless/wl12xx/io.h2
-rw-r--r--drivers/net/wireless/wl12xx/main.c1115
-rw-r--r--drivers/net/wireless/wl12xx/ps.c38
-rw-r--r--drivers/net/wireless/wl12xx/ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/reg.h27
-rw-r--r--drivers/net/wireless/wl12xx/rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/scan.c62
-rw-r--r--drivers/net/wireless/wl12xx/scan.h2
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c29
-rw-r--r--drivers/net/wireless/wl12xx/spi.c8
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c50
-rw-r--r--drivers/net/wireless/wl12xx/tx.c116
-rw-r--r--drivers/net/wireless/wl12xx/tx.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h53
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c12
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c62
888 files changed, 54809 insertions, 36413 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0ae0d7c54ccf..793b00138275 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -660,7 +660,7 @@ static void __attach_bond_to_agg(struct port *port)
static void __detach_bond_from_agg(struct port *port)
{
port = NULL; /* just to satisfy the compiler */
- // This function does nothing sience the parser/multiplexer of the receive
+ // This function does nothing since the parser/multiplexer of the receive
// and the parser/multiplexer of the aggregator are already combined
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f820b26b9db3..9abfde479316 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -180,11 +180,9 @@ static int tlb_initialize(struct bonding *bond)
int i;
new_hashtbl = kzalloc(size, GFP_KERNEL);
- if (!new_hashtbl) {
- pr_err("%s: Error: Failed to allocate TLB hash table\n",
- bond->dev->name);
+ if (!new_hashtbl)
return -1;
- }
+
_lock_tx_hashtbl_bh(bond);
bond_info->tx_hashtbl = new_hashtbl;
@@ -784,11 +782,9 @@ static int rlb_initialize(struct bonding *bond)
int i;
new_hashtbl = kmalloc(size, GFP_KERNEL);
- if (!new_hashtbl) {
- pr_err("%s: Error: Failed to allocate RLB hash table\n",
- bond->dev->name);
+ if (!new_hashtbl)
return -1;
- }
+
_lock_rx_hashtbl_bh(bond);
bond_info->rx_hashtbl = new_hashtbl;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 435984ad8b2f..0730203a19f2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -766,18 +766,30 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
- struct net_device *vlan_dev;
+ struct net_device *bond_dev, *vlan_dev, *master_dev;
struct vlan_entry *vlan;
read_lock(&bond->lock);
+ bond_dev = bond->dev;
+
/* rejoin all groups on bond device */
- __bond_resend_igmp_join_requests(bond->dev);
+ __bond_resend_igmp_join_requests(bond_dev);
+
+ /*
+ * if bond is enslaved to a bridge,
+ * then rejoin all groups on its master
+ */
+ master_dev = bond_dev->master;
+ if (master_dev)
+ if ((master_dev->priv_flags & IFF_EBRIDGE)
+ && (bond_dev->priv_flags & IFF_BRIDGE_PORT))
+ __bond_resend_igmp_join_requests(master_dev);
/* rejoin all groups on vlan devices */
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
+ vlan_dev = __vlan_find_dev_deep(bond_dev,
vlan->vlan_id);
rcu_read_unlock();
if (vlan_dev)
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index c998e1afebc6..9a66e2a910ae 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -426,6 +426,35 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
return xfer_sz;
}
+static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
+{
+ int xfer_sz = 0;
+ int nfrms = 0;
+ u16 *plen;
+
+ if ((desc->header & ~CFHSI_PIGGY_DESC) ||
+ (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
+
+ pr_err("Invalid descriptor. %x %x\n", desc->header,
+ desc->offset);
+ return -EPROTO;
+ }
+
+ /* Calculate transfer length. */
+ plen = desc->cffrm_len;
+ while (nfrms < CFHSI_MAX_PKTS && *plen) {
+ xfer_sz += *plen;
+ plen++;
+ nfrms++;
+ }
+
+ if (xfer_sz % 4) {
+ pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
+ return -EPROTO;
+ }
+ return xfer_sz;
+}
+
static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
{
int rx_sz = 0;
@@ -517,8 +546,10 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
static void cfhsi_rx_done(struct cfhsi *cfhsi)
{
int res;
- int desc_pld_len = 0;
+ int desc_pld_len = 0, rx_len, rx_state;
struct cfhsi_desc *desc = NULL;
+ u8 *rx_ptr, *rx_buf;
+ struct cfhsi_desc *piggy_desc = NULL;
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
@@ -534,65 +565,71 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
spin_unlock_bh(&cfhsi->lock);
if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
- if (desc_pld_len == -ENOMEM)
- goto restart;
- if (desc_pld_len == -EPROTO)
+ desc_pld_len = cfhsi_rx_desc_len(desc);
+
+ if (desc_pld_len < 0)
goto out_of_sync;
+
+ rx_buf = cfhsi->rx_buf;
+ rx_len = desc_pld_len;
+ if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
+ rx_len += CFHSI_DESC_SZ;
+ if (desc_pld_len == 0)
+ rx_buf = cfhsi->rx_flip_buf;
} else {
- int pld_len;
+ rx_buf = cfhsi->rx_flip_buf;
- if (!cfhsi->rx_state.piggy_desc) {
- pld_len = cfhsi_rx_pld(desc, cfhsi);
- if (pld_len == -ENOMEM)
- goto restart;
- if (pld_len == -EPROTO)
- goto out_of_sync;
- cfhsi->rx_state.pld_len = pld_len;
- } else {
- pld_len = cfhsi->rx_state.pld_len;
- }
+ rx_len = CFHSI_DESC_SZ;
+ if (cfhsi->rx_state.pld_len > 0 &&
+ (desc->header & CFHSI_PIGGY_DESC)) {
- if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
- struct cfhsi_desc *piggy_desc;
piggy_desc = (struct cfhsi_desc *)
(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
- pld_len);
+ cfhsi->rx_state.pld_len);
+
cfhsi->rx_state.piggy_desc = true;
- /* Extract piggy-backed descriptor. */
- desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
- if (desc_pld_len == -ENOMEM)
- goto restart;
+ /* Extract payload len from piggy-backed descriptor. */
+ desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
+ if (desc_pld_len < 0)
+ goto out_of_sync;
+
+ if (desc_pld_len > 0)
+ rx_len = desc_pld_len;
+
+ if (desc_pld_len > 0 &&
+ (piggy_desc->header & CFHSI_PIGGY_DESC))
+ rx_len += CFHSI_DESC_SZ;
/*
* Copy needed information from the piggy-backed
* descriptor to the descriptor in the start.
*/
- memcpy((u8 *)desc, (u8 *)piggy_desc,
+ memcpy(rx_buf, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
-
+ /* Mark no embedded frame here */
+ piggy_desc->offset = 0;
if (desc_pld_len == -EPROTO)
goto out_of_sync;
}
}
- memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
if (desc_pld_len) {
- cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
- cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
- cfhsi->rx_len = desc_pld_len;
+ rx_state = CFHSI_RX_STATE_PAYLOAD;
+ rx_ptr = rx_buf + CFHSI_DESC_SZ;
} else {
- cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
- cfhsi->rx_ptr = cfhsi->rx_buf;
- cfhsi->rx_len = CFHSI_DESC_SZ;
+ rx_state = CFHSI_RX_STATE_DESC;
+ rx_ptr = rx_buf;
+ rx_len = CFHSI_DESC_SZ;
}
+ /* Initiate next read */
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
/* Set up new transfer. */
dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
- __func__);
- res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
+ __func__);
+
+ res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
cfhsi->dev);
if (WARN_ON(res < 0)) {
dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
@@ -601,16 +638,32 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
cfhsi->ndev->stats.rx_dropped++;
}
}
- return;
-restart:
- if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
- dev_err(&cfhsi->ndev->dev, "%s: No memory available "
- "in %d iterations.\n",
- __func__, CFHSI_MAX_RX_RETRIES);
- BUG();
+ if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
+ /* Extract payload from descriptor */
+ if (cfhsi_rx_desc(desc, cfhsi) < 0)
+ goto out_of_sync;
+ } else {
+ /* Extract payload */
+ if (cfhsi_rx_pld(desc, cfhsi) < 0)
+ goto out_of_sync;
+ if (piggy_desc) {
+ /* Extract any payload in piggyback descriptor. */
+ if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
+ goto out_of_sync;
+ }
}
- mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
+
+ /* Update state info */
+ memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
+ cfhsi->rx_state.state = rx_state;
+ cfhsi->rx_ptr = rx_ptr;
+ cfhsi->rx_len = rx_len;
+ cfhsi->rx_state.pld_len = desc_pld_len;
+ cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
+
+ if (rx_buf != cfhsi->rx_buf)
+ swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
return;
out_of_sync:
@@ -1040,6 +1093,12 @@ int cfhsi_probe(struct platform_device *pdev)
goto err_alloc_rx;
}
+ cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
+ if (!cfhsi->rx_flip_buf) {
+ res = -ENODEV;
+ goto err_alloc_rx_flip;
+ }
+
/* Pre-calculate inactivity timeout. */
if (inactivity_timeout != -1) {
cfhsi->inactivity_timeout =
@@ -1138,6 +1197,8 @@ int cfhsi_probe(struct platform_device *pdev)
err_activate:
destroy_workqueue(cfhsi->wq);
err_create_wq:
+ kfree(cfhsi->rx_flip_buf);
+ err_alloc_rx_flip:
kfree(cfhsi->rx_buf);
err_alloc_rx:
kfree(cfhsi->tx_buf);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index ab45758c49a4..bb709fd66993 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -103,11 +103,11 @@ config CAN_FLEXCAN
Say Y here if you want to support for Freescale FlexCAN.
config PCH_CAN
- tristate "PCH CAN"
+ tristate "Intel EG20T PCH CAN controller"
depends on CAN_DEV && PCI
---help---
- This driver is for PCH CAN of Topcliff which is an IOH for x86
- embedded processor.
+ This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
+ is an IOH for x86 embedded processor (Intel Atom E6xx series).
This driver can access CAN bus.
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 349e0fabb63a..3f88473423e9 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -82,8 +82,7 @@ static int bfin_can_set_bittiming(struct net_device *dev)
bfin_write(&reg->clock, clk);
bfin_write(&reg->timing, timing);
- dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
- clk, timing);
+ netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing);
return 0;
}
@@ -108,8 +107,7 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
while (!(bfin_read(&reg->control) & CCA)) {
udelay(10);
if (--timeout == 0) {
- dev_err(dev->dev.parent,
- "fail to enter configuration mode\n");
+ netdev_err(dev, "fail to enter configuration mode\n");
BUG();
}
}
@@ -165,8 +163,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
while (bfin_read(&reg->status) & CCA) {
udelay(10);
if (--timeout == 0) {
- dev_err(dev->dev.parent,
- "fail to leave configuration mode\n");
+ netdev_err(dev, "fail to leave configuration mode\n");
BUG();
}
}
@@ -224,6 +221,20 @@ static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
return 0;
}
+static int bfin_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+
+ u16 cec = bfin_read(&reg->cec);
+
+ bec->txerr = cec >> 8;
+ bec->rxerr = cec;
+
+ return 0;
+}
+
static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bfin_can_priv *priv = netdev_priv(dev);
@@ -331,7 +342,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
if (isrc & RMLIS) {
/* data overrun interrupt */
- dev_dbg(dev->dev.parent, "data overrun interrupt\n");
+ netdev_dbg(dev, "data overrun interrupt\n");
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
@@ -339,7 +350,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
}
if (isrc & BOIS) {
- dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
+ netdev_dbg(dev, "bus-off mode interrupt\n");
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
can_bus_off(dev);
@@ -347,13 +358,12 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
if (isrc & EPIS) {
/* error passive interrupt */
- dev_dbg(dev->dev.parent, "error passive interrupt\n");
+ netdev_dbg(dev, "error passive interrupt\n");
state = CAN_STATE_ERROR_PASSIVE;
}
if ((isrc & EWTIS) || (isrc & EWRIS)) {
- dev_dbg(dev->dev.parent,
- "Error Warning Transmit/Receive Interrupt\n");
+ netdev_dbg(dev, "Error Warning Transmit/Receive Interrupt\n");
state = CAN_STATE_ERROR_WARNING;
}
@@ -509,6 +519,7 @@ struct net_device *alloc_bfin_candev(void)
priv->can.bittiming_const = &bfin_can_bittiming_const;
priv->can.do_set_bittiming = bfin_can_set_bittiming;
priv->can.do_set_mode = bfin_can_set_mode;
+ priv->can.do_get_berr_counter = bfin_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
return dev;
@@ -636,8 +647,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
while (!(bfin_read(&reg->intr) & SMACK)) {
udelay(10);
if (--timeout == 0) {
- dev_err(dev->dev.parent,
- "fail to enter sleep mode\n");
+ netdev_err(dev, "fail to enter sleep mode\n");
BUG();
}
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c30f0e6f1048..d42a6a7396f2 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -34,7 +34,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/dev.h>
#include <linux/can/platform/cc770.h>
#include "cc770.h"
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 120f1ab5a2ce..c5fe3a3db8c9 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -130,13 +130,13 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
/* Error in one-tenth of a percent */
error = (best_error * 1000) / bt->bitrate;
if (error > CAN_CALC_MAX_ERROR) {
- dev_err(dev->dev.parent,
- "bitrate error %ld.%ld%% too high\n",
- error / 10, error % 10);
+ netdev_err(dev,
+ "bitrate error %ld.%ld%% too high\n",
+ error / 10, error % 10);
return -EDOM;
} else {
- dev_warn(dev->dev.parent, "bitrate error %ld.%ld%%\n",
- error / 10, error % 10);
+ netdev_warn(dev, "bitrate error %ld.%ld%%\n",
+ error / 10, error % 10);
}
}
@@ -172,7 +172,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
#else /* !CONFIG_CAN_CALC_BITTIMING */
static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
{
- dev_err(dev->dev.parent, "bit-timing calculation not available\n");
+ netdev_err(dev, "bit-timing calculation not available\n");
return -EINVAL;
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
@@ -313,8 +313,7 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
priv->echo_skb[idx] = skb;
} else {
/* locking problem with netif_stop_queue() ?? */
- dev_err(dev->dev.parent, "%s: BUG! echo_skb is occupied!\n",
- __func__);
+ netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
kfree_skb(skb);
}
}
@@ -327,16 +326,24 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
* is handled in the device driver. The driver must protect
* access to priv->echo_skb, if necessary.
*/
-void can_get_echo_skb(struct net_device *dev, unsigned int idx)
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
{
struct can_priv *priv = netdev_priv(dev);
BUG_ON(idx >= priv->echo_skb_max);
if (priv->echo_skb[idx]) {
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 dlc = cf->can_dlc;
+
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
+
+ return dlc;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(can_get_echo_skb);
@@ -392,7 +399,7 @@ void can_restart(unsigned long data)
stats->rx_bytes += cf->can_dlc;
restart:
- dev_dbg(dev->dev.parent, "restarted\n");
+ netdev_dbg(dev, "restarted\n");
priv->can_stats.restarts++;
/* Now restart the device */
@@ -400,7 +407,7 @@ restart:
netif_carrier_on(dev);
if (err)
- dev_err(dev->dev.parent, "Error %d during restart", err);
+ netdev_err(dev, "Error %d during restart", err);
}
int can_restart_now(struct net_device *dev)
@@ -433,7 +440,7 @@ void can_bus_off(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- dev_dbg(dev->dev.parent, "bus-off\n");
+ netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can_stats.bus_off++;
@@ -545,7 +552,7 @@ int open_candev(struct net_device *dev)
struct can_priv *priv = netdev_priv(dev);
if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
- dev_err(dev->dev.parent, "bit-timing not yet defined\n");
+ netdev_err(dev, "bit-timing not yet defined\n");
return -EINVAL;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 96d235799ec1..1efb08386c61 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -272,7 +272,6 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct flexcan_regs __iomem *regs = priv->base;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id;
@@ -302,14 +301,11 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
flexcan_write(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
}
+ can_put_echo_skb(skb, dev, 0);
+
flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
- kfree_skb(skb);
-
- /* tx_packets is incremented in flexcan_irq */
- stats->tx_bytes += cf->can_dlc;
-
return NETDEV_TX_OK;
}
@@ -322,34 +318,34 @@ static void do_bus_err(struct net_device *dev,
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
- dev_dbg(dev->dev.parent, "BIT1_ERR irq\n");
+ netdev_dbg(dev, "BIT1_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT1;
tx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
- dev_dbg(dev->dev.parent, "BIT0_ERR irq\n");
+ netdev_dbg(dev, "BIT0_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT0;
tx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
- dev_dbg(dev->dev.parent, "ACK_ERR irq\n");
+ netdev_dbg(dev, "ACK_ERR irq\n");
cf->can_id |= CAN_ERR_ACK;
cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
tx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
- dev_dbg(dev->dev.parent, "CRC_ERR irq\n");
+ netdev_dbg(dev, "CRC_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT;
cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
rx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
- dev_dbg(dev->dev.parent, "FRM_ERR irq\n");
+ netdev_dbg(dev, "FRM_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_FORM;
rx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_STF_ERR) {
- dev_dbg(dev->dev.parent, "STF_ERR irq\n");
+ netdev_dbg(dev, "STF_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_STUFF;
rx_errors = 1;
}
@@ -396,7 +392,7 @@ static void do_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
@@ -412,7 +408,7 @@ static void do_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
@@ -422,8 +418,8 @@ static void do_state(struct net_device *dev,
}
break;
case CAN_STATE_BUS_OFF:
- dev_err(dev->dev.parent,
- "BUG! hardware recovered automatically from BUS_OFF\n");
+ netdev_err(dev, "BUG! "
+ "hardware recovered automatically from BUS_OFF\n");
break;
default:
break;
@@ -432,7 +428,7 @@ static void do_state(struct net_device *dev,
/* process state changes depending on the new state */
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
- dev_dbg(dev->dev.parent, "Error Active\n");
+ netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
break;
@@ -614,7 +610,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* transmission complete interrupt */
if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
- /* tx_bytes is incremented in flexcan_start_xmit */
+ stats->tx_bytes += can_get_echo_skb(dev, 0);
stats->tx_packets++;
flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
netif_wake_queue(dev);
@@ -653,12 +649,12 @@ static void flexcan_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
reg |= FLEXCAN_CTRL_SMP;
- dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg);
+ netdev_info(dev, "writing ctrl=0x%08x\n", reg);
flexcan_write(reg, &regs->ctrl);
/* print chip status */
- dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+ flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
}
/*
@@ -684,9 +680,8 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mcr = flexcan_read(&regs->mcr);
if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
- dev_err(dev->dev.parent,
- "Failed to softreset can module (mcr=0x%08x)\n",
- reg_mcr);
+ netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n",
+ reg_mcr);
err = -ENODEV;
goto out;
}
@@ -702,13 +697,14 @@ static int flexcan_chip_start(struct net_device *dev)
* only supervisor access
* enable warning int
* choose format C
+ * disable local echo
*
*/
reg_mcr = flexcan_read(&regs->mcr);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
- FLEXCAN_MCR_IDAM_C;
- dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+ netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
flexcan_write(reg_mcr, &regs->mcr);
/*
@@ -734,7 +730,7 @@ static int flexcan_chip_start(struct net_device *dev)
/* save for later use */
priv->reg_ctrl_default = reg_ctrl;
- dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+ netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
flexcan_write(reg_ctrl, &regs->ctrl);
for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
@@ -766,8 +762,8 @@ static int flexcan_chip_start(struct net_device *dev)
flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
/* print chip status */
- dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n",
- __func__, flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
+ flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
return 0;
@@ -905,8 +901,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
*/
reg = flexcan_read(&regs->mcr);
if (!(reg & FLEXCAN_MCR_FEN)) {
- dev_err(dev->dev.parent,
- "Could not enable RX FIFO, unsupported core\n");
+ netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
goto out;
}
@@ -975,7 +970,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
goto failed_map;
}
- dev = alloc_candev(sizeof(struct flexcan_priv), 0);
+ dev = alloc_candev(sizeof(struct flexcan_priv), 1);
if (!dev) {
err = -ENOMEM;
goto failed_alloc;
@@ -983,7 +978,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
- dev->flags |= IFF_ECHO; /* we support local echo in hardware */
+ dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
priv->can.clock.freq = clock_freq;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 330140ee266d..346785c56a25 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -712,8 +712,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
frame->data[1] = data1;
netif_rx_ni(skb);
} else {
- dev_err(&net->dev,
- "cannot allocate error skb\n");
+ netdev_err(net, "cannot allocate error skb\n");
}
}
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 1c82dd8b896e..41a2a2dda7ea 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -95,9 +95,9 @@ static int mscan_set_mode(struct net_device *dev, u8 mode)
* any, at once.
*/
if (i >= MSCAN_SET_MODE_RETRIES)
- dev_dbg(dev->dev.parent,
- "device failed to enter sleep mode. "
- "We proceed anyhow.\n");
+ netdev_dbg(dev,
+ "device failed to enter sleep mode. "
+ "We proceed anyhow.\n");
else
priv->can.state = CAN_STATE_SLEEPING;
}
@@ -213,7 +213,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
switch (hweight8(i)) {
case 0:
netif_stop_queue(dev);
- dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
+ netdev_err(dev, "Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
case 1:
/*
@@ -352,7 +352,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
struct net_device_stats *stats = &dev->stats;
enum can_state old_state;
- dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
+ netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
frame->can_id = CAN_ERR_FLAG;
if (canrflg & MSCAN_OVRIF) {
@@ -427,7 +427,7 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
skb = alloc_can_skb(dev, &frame);
if (!skb) {
if (printk_ratelimit())
- dev_notice(dev->dev.parent, "packet dropped\n");
+ netdev_notice(dev, "packet dropped\n");
stats->rx_dropped++;
out_8(&regs->canrflg, canrflg);
continue;
@@ -551,8 +551,7 @@ static int mscan_do_set_bittiming(struct net_device *dev)
BTR1_SET_TSEG2(bt->phase_seg2) |
BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
- dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
- btr0, btr1);
+ netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
out_8(&regs->canbtr0, btr0);
out_8(&regs->canbtr1, btr1);
@@ -560,6 +559,18 @@ static int mscan_do_set_bittiming(struct net_device *dev)
return 0;
}
+static int mscan_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct mscan_priv *priv = netdev_priv(dev);
+ struct mscan_regs __iomem *regs = priv->reg_base;
+
+ bec->txerr = in_8(&regs->cantxerr);
+ bec->rxerr = in_8(&regs->canrxerr);
+
+ return 0;
+}
+
static int mscan_open(struct net_device *dev)
{
int ret;
@@ -575,7 +586,7 @@ static int mscan_open(struct net_device *dev)
ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
if (ret < 0) {
- dev_err(dev->dev.parent, "failed to attach interrupt\n");
+ netdev_err(dev, "failed to attach interrupt\n");
goto exit_napi_disable;
}
@@ -639,8 +650,10 @@ int register_mscandev(struct net_device *dev, int mscan_clksrc)
else
ctl1 &= ~MSCAN_CLKSRC;
- if (priv->type == MSCAN_TYPE_MPC5121)
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ priv->can.do_get_berr_counter = mscan_get_berr_counter;
ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
+ }
ctl1 |= MSCAN_CANE;
out_8(&regs->canctl1, ctl1);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 6edc25e0dd15..2bb215e00eb1 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 36e9d594069d..b60d6c5f29a0 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -43,12 +43,33 @@ config CAN_EMS_PCI
CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
(http://www.ems-wuensche.de).
+config CAN_PEAK_PCMCIA
+ tristate "PEAK PCAN-PC Card"
+ depends on PCMCIA
+ ---help---
+ This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
+ from PEAK-System (http://www.peak-system.com). To compile this
+ driver as a module, choose M here: the module will be called
+ peak_pcmcia.
+
config CAN_PEAK_PCI
- tristate "PEAK PCAN PCI/PCIe Cards"
+ tristate "PEAK PCAN-PCI/PCIe/miniPCI Cards"
depends on PCI
---help---
- This driver is for the PCAN PCI/PCIe cards (1, 2, 3 or 4 channels)
- from PEAK Systems (http://www.peak-system.com).
+ This driver is for the PCAN-PCI/PCIe/miniPCI cards
+ (1, 2, 3 or 4 channels) from PEAK-System Technik
+ (http://www.peak-system.com).
+
+config CAN_PEAK_PCIEC
+ bool "PEAK PCAN-ExpressCard Cards"
+ depends on CAN_PEAK_PCI
+ select I2C
+ select I2C_ALGOBIT
+ default y
+ ---help---
+ Say Y here if you want to use a PCAN-ExpressCard from PEAK-System
+ Technik. This will also automatically select I2C and I2C_ALGO
+ configuration options.
config CAN_KVASER_PCI
tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
@@ -71,6 +92,7 @@ config CAN_PLX_PCI
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+ - IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/)
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 0604f240c8b1..b3d05cbfec36 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o
obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 214795945bc4..5f92b865f64b 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
@@ -13,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
@@ -26,22 +23,26 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include "sja1000.h"
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI/PCIe cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe CAN card");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");
MODULE_LICENSE("GPL v2");
#define DRV_NAME "peak_pci"
+struct peak_pciec_card;
struct peak_pci_chan {
void __iomem *cfg_base; /* Common for all channels */
struct net_device *prev_dev; /* Chain of network devices */
u16 icr_mask; /* Interrupt mask for fast ack */
+ struct peak_pciec_card *pciec_card; /* only for PCIeC LEDs */
};
#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
@@ -61,16 +62,464 @@ struct peak_pci_chan {
#define PEAK_PCI_VENDOR_ID 0x001C /* The PCI device and vendor IDs */
#define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */
+#define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */
+#define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */
+#define PEAK_MPCI_DEVICE_ID 0x0008 /* The miniPCI slot cards */
+
+#define PEAK_PCI_CHAN_MAX 4
-static const u16 peak_pci_icr_masks[] = {0x02, 0x01, 0x40, 0x80};
+static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = {
+ 0x02, 0x01, 0x40, 0x80
+};
static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = {
{PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+#ifdef CONFIG_CAN_PEAK_PCIEC
+ {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+#endif
{0,}
};
MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
+#ifdef CONFIG_CAN_PEAK_PCIEC
+/*
+ * PCAN-ExpressCard needs I2C bit-banging configuration option.
+ */
+
+/* GPIOICR byte access offsets */
+#define PITA_GPOUT 0x18 /* GPx output value */
+#define PITA_GPIN 0x19 /* GPx input value */
+#define PITA_GPOEN 0x1A /* configure GPx as ouput pin */
+
+/* I2C GP bits */
+#define PITA_GPIN_SCL 0x01 /* Serial Clock Line */
+#define PITA_GPIN_SDA 0x04 /* Serial DAta line */
+
+#define PCA9553_1_SLAVEADDR (0xC4 >> 1)
+
+/* PCA9553 LS0 fields values */
+enum {
+ PCA9553_LOW,
+ PCA9553_HIGHZ,
+ PCA9553_PWM0,
+ PCA9553_PWM1
+};
+
+/* LEDs control */
+#define PCA9553_ON PCA9553_LOW
+#define PCA9553_OFF PCA9553_HIGHZ
+#define PCA9553_SLOW PCA9553_PWM0
+#define PCA9553_FAST PCA9553_PWM1
+
+#define PCA9553_LED(c) (1 << (c))
+#define PCA9553_LED_STATE(s, c) ((s) << ((c) << 1))
+
+#define PCA9553_LED_ON(c) PCA9553_LED_STATE(PCA9553_ON, c)
+#define PCA9553_LED_OFF(c) PCA9553_LED_STATE(PCA9553_OFF, c)
+#define PCA9553_LED_SLOW(c) PCA9553_LED_STATE(PCA9553_SLOW, c)
+#define PCA9553_LED_FAST(c) PCA9553_LED_STATE(PCA9553_FAST, c)
+#define PCA9553_LED_MASK(c) PCA9553_LED_STATE(0x03, c)
+
+#define PCA9553_LED_OFF_ALL (PCA9553_LED_OFF(0) | PCA9553_LED_OFF(1))
+
+#define PCA9553_LS0_INIT 0x40 /* initial value (!= from 0x00) */
+
+struct peak_pciec_chan {
+ struct net_device *netdev;
+ unsigned long prev_rx_bytes;
+ unsigned long prev_tx_bytes;
+};
+
+struct peak_pciec_card {
+ void __iomem *cfg_base; /* Common for all channels */
+ void __iomem *reg_base; /* first channel base address */
+ u8 led_cache; /* leds state cache */
+
+ /* PCIExpressCard i2c data */
+ struct i2c_algo_bit_data i2c_bit;
+ struct i2c_adapter led_chip;
+ struct delayed_work led_work; /* led delayed work */
+ int chan_count;
+ struct peak_pciec_chan channel[PEAK_PCI_CHAN_MAX];
+};
+
+/* "normal" pci register write callback is overloaded for leds control */
+static void peak_pci_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val);
+
+static inline void pita_set_scl_highz(struct peak_pciec_card *card)
+{
+ u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL;
+ writeb(gp_outen, card->cfg_base + PITA_GPOEN);
+}
+
+static inline void pita_set_sda_highz(struct peak_pciec_card *card)
+{
+ u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA;
+ writeb(gp_outen, card->cfg_base + PITA_GPOEN);
+}
+
+static void peak_pciec_init_pita_gpio(struct peak_pciec_card *card)
+{
+ /* raise SCL & SDA GPIOs to high-Z */
+ pita_set_scl_highz(card);
+ pita_set_sda_highz(card);
+}
+
+static void pita_setsda(void *data, int state)
+{
+ struct peak_pciec_card *card = (struct peak_pciec_card *)data;
+ u8 gp_out, gp_outen;
+
+ /* set output sda always to 0 */
+ gp_out = readb(card->cfg_base + PITA_GPOUT) & ~PITA_GPIN_SDA;
+ writeb(gp_out, card->cfg_base + PITA_GPOUT);
+
+ /* control output sda with GPOEN */
+ gp_outen = readb(card->cfg_base + PITA_GPOEN);
+ if (state)
+ gp_outen &= ~PITA_GPIN_SDA;
+ else
+ gp_outen |= PITA_GPIN_SDA;
+
+ writeb(gp_outen, card->cfg_base + PITA_GPOEN);
+}
+
+static void pita_setscl(void *data, int state)
+{
+ struct peak_pciec_card *card = (struct peak_pciec_card *)data;
+ u8 gp_out, gp_outen;
+
+ /* set output scl always to 0 */
+ gp_out = readb(card->cfg_base + PITA_GPOUT) & ~PITA_GPIN_SCL;
+ writeb(gp_out, card->cfg_base + PITA_GPOUT);
+
+ /* control output scl with GPOEN */
+ gp_outen = readb(card->cfg_base + PITA_GPOEN);
+ if (state)
+ gp_outen &= ~PITA_GPIN_SCL;
+ else
+ gp_outen |= PITA_GPIN_SCL;
+
+ writeb(gp_outen, card->cfg_base + PITA_GPOEN);
+}
+
+static int pita_getsda(void *data)
+{
+ struct peak_pciec_card *card = (struct peak_pciec_card *)data;
+
+ /* set tristate */
+ pita_set_sda_highz(card);
+
+ return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SDA) ? 1 : 0;
+}
+
+static int pita_getscl(void *data)
+{
+ struct peak_pciec_card *card = (struct peak_pciec_card *)data;
+
+ /* set tristate */
+ pita_set_scl_highz(card);
+
+ return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0;
+}
+
+/*
+ * write commands to the LED chip though the I2C-bus of the PCAN-PCIeC
+ */
+static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
+ u8 offset, u8 data)
+{
+ u8 buffer[2] = {
+ offset,
+ data
+ };
+ struct i2c_msg msg = {
+ .addr = PCA9553_1_SLAVEADDR,
+ .len = 2,
+ .buf = buffer,
+ };
+ int ret;
+
+ /* cache led mask */
+ if ((offset == 5) && (data == card->led_cache))
+ return 0;
+
+ ret = i2c_transfer(&card->led_chip, &msg, 1);
+ if (ret < 0)
+ return ret;
+
+ if (offset == 5)
+ card->led_cache = data;
+
+ return 0;
+}
+
+/*
+ * delayed work callback used to control the LEDs
+ */
+static void peak_pciec_led_work(struct work_struct *work)
+{
+ struct peak_pciec_card *card =
+ container_of(work, struct peak_pciec_card, led_work.work);
+ struct net_device *netdev;
+ u8 new_led = card->led_cache;
+ int i, up_count = 0;
+
+ /* first check what is to do */
+ for (i = 0; i < card->chan_count; i++) {
+ /* default is: not configured */
+ new_led &= ~PCA9553_LED_MASK(i);
+ new_led |= PCA9553_LED_ON(i);
+
+ netdev = card->channel[i].netdev;
+ if (!netdev || !(netdev->flags & IFF_UP))
+ continue;
+
+ up_count++;
+
+ /* no activity (but configured) */
+ new_led &= ~PCA9553_LED_MASK(i);
+ new_led |= PCA9553_LED_SLOW(i);
+
+ /* if bytes counters changed, set fast blinking led */
+ if (netdev->stats.rx_bytes != card->channel[i].prev_rx_bytes) {
+ card->channel[i].prev_rx_bytes = netdev->stats.rx_bytes;
+ new_led &= ~PCA9553_LED_MASK(i);
+ new_led |= PCA9553_LED_FAST(i);
+ }
+ if (netdev->stats.tx_bytes != card->channel[i].prev_tx_bytes) {
+ card->channel[i].prev_tx_bytes = netdev->stats.tx_bytes;
+ new_led &= ~PCA9553_LED_MASK(i);
+ new_led |= PCA9553_LED_FAST(i);
+ }
+ }
+
+ /* check if LS0 settings changed, only update i2c if so */
+ peak_pciec_write_pca9553(card, 5, new_led);
+
+ /* restart timer (except if no more configured channels) */
+ if (up_count)
+ schedule_delayed_work(&card->led_work, HZ);
+}
+
+/*
+ * set LEDs blinking state
+ */
+static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
+{
+ u8 new_led = card->led_cache;
+ int i;
+
+ /* first check what is to do */
+ for (i = 0; i < card->chan_count; i++)
+ if (led_mask & PCA9553_LED(i)) {
+ new_led &= ~PCA9553_LED_MASK(i);
+ new_led |= PCA9553_LED_STATE(s, i);
+ }
+
+ /* check if LS0 settings changed, only update i2c if so */
+ peak_pciec_write_pca9553(card, 5, new_led);
+}
+
+/*
+ * start one second delayed work to control LEDs
+ */
+static void peak_pciec_start_led_work(struct peak_pciec_card *card)
+{
+ if (!delayed_work_pending(&card->led_work))
+ schedule_delayed_work(&card->led_work, HZ);
+}
+
+/*
+ * stop LEDs delayed work
+ */
+static void peak_pciec_stop_led_work(struct peak_pciec_card *card)
+{
+ cancel_delayed_work_sync(&card->led_work);
+}
+
+/*
+ * initialize the PCA9553 4-bit I2C-bus LED chip
+ */
+static int peak_pciec_init_leds(struct peak_pciec_card *card)
+{
+ int err;
+
+ /* prescaler for frequency 0: "SLOW" = 1 Hz = "44" */
+ err = peak_pciec_write_pca9553(card, 1, 44 / 1);
+ if (err)
+ return err;
+
+ /* duty cycle 0: 50% */
+ err = peak_pciec_write_pca9553(card, 2, 0x80);
+ if (err)
+ return err;
+
+ /* prescaler for frequency 1: "FAST" = 5 Hz */
+ err = peak_pciec_write_pca9553(card, 3, 44 / 5);
+ if (err)
+ return err;
+
+ /* duty cycle 1: 50% */
+ err = peak_pciec_write_pca9553(card, 4, 0x80);
+ if (err)
+ return err;
+
+ /* switch LEDs to initial state */
+ return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT);
+}
+
+/*
+ * restore LEDs state to off peak_pciec_leds_exit
+ */
+static void peak_pciec_leds_exit(struct peak_pciec_card *card)
+{
+ /* switch LEDs to off */
+ peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL);
+}
+
+/*
+ * normal write sja1000 register method overloaded to catch when controller
+ * is started or stopped, to control leds
+ */
+static void peak_pciec_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ struct peak_pci_chan *chan = priv->priv;
+ struct peak_pciec_card *card = chan->pciec_card;
+ int c = (priv->reg_base - card->reg_base) / PEAK_PCI_CHAN_SIZE;
+
+ /* sja1000 register changes control the leds state */
+ if (port == REG_MOD)
+ switch (val) {
+ case MOD_RM:
+ /* Reset Mode: set led on */
+ peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_ON);
+ break;
+ case 0x00:
+ /* Normal Mode: led slow blinking and start led timer */
+ peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_SLOW);
+ peak_pciec_start_led_work(card);
+ break;
+ default:
+ break;
+ }
+
+ /* call base function */
+ peak_pci_write_reg(priv, port, val);
+}
+
+static struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = {
+ .setsda = pita_setsda,
+ .setscl = pita_setscl,
+ .getsda = pita_getsda,
+ .getscl = pita_getscl,
+ .udelay = 10,
+ .timeout = HZ,
+};
+
+static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct peak_pci_chan *chan = priv->priv;
+ struct peak_pciec_card *card;
+ int err;
+
+ /* copy i2c object address from 1st channel */
+ if (chan->prev_dev) {
+ struct sja1000_priv *prev_priv = netdev_priv(chan->prev_dev);
+ struct peak_pci_chan *prev_chan = prev_priv->priv;
+
+ card = prev_chan->pciec_card;
+ if (!card)
+ return -ENODEV;
+
+ /* channel is the first one: do the init part */
+ } else {
+ /* create the bit banging I2C adapter structure */
+ card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev,
+ "failed allocating memory for i2c chip\n");
+ return -ENOMEM;
+ }
+
+ card->cfg_base = chan->cfg_base;
+ card->reg_base = priv->reg_base;
+
+ card->led_chip.owner = THIS_MODULE;
+ card->led_chip.dev.parent = &pdev->dev;
+ card->led_chip.algo_data = &card->i2c_bit;
+ strncpy(card->led_chip.name, "peak_i2c",
+ sizeof(card->led_chip.name));
+
+ card->i2c_bit = peak_pciec_i2c_bit_ops;
+ card->i2c_bit.udelay = 10;
+ card->i2c_bit.timeout = HZ;
+ card->i2c_bit.data = card;
+
+ peak_pciec_init_pita_gpio(card);
+
+ err = i2c_bit_add_bus(&card->led_chip);
+ if (err) {
+ dev_err(&pdev->dev, "i2c init failed\n");
+ goto pciec_init_err_1;
+ }
+
+ err = peak_pciec_init_leds(card);
+ if (err) {
+ dev_err(&pdev->dev, "leds hardware init failed\n");
+ goto pciec_init_err_2;
+ }
+
+ INIT_DELAYED_WORK(&card->led_work, peak_pciec_led_work);
+ /* PCAN-ExpressCard needs its own callback for leds */
+ priv->write_reg = peak_pciec_write_reg;
+ }
+
+ chan->pciec_card = card;
+ card->channel[card->chan_count++].netdev = dev;
+
+ return 0;
+
+pciec_init_err_2:
+ i2c_del_adapter(&card->led_chip);
+
+pciec_init_err_1:
+ peak_pciec_init_pita_gpio(card);
+ kfree(card);
+
+ return err;
+}
+
+static void peak_pciec_remove(struct peak_pciec_card *card)
+{
+ peak_pciec_stop_led_work(card);
+ peak_pciec_leds_exit(card);
+ i2c_del_adapter(&card->led_chip);
+ peak_pciec_init_pita_gpio(card);
+ kfree(card);
+}
+
+#else /* CONFIG_CAN_PEAK_PCIEC */
+
+/*
+ * Placebo functions when PCAN-ExpressCard support is not selected
+ */
+static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void peak_pciec_remove(struct peak_pciec_card *card)
+{
+}
+#endif /* CONFIG_CAN_PEAK_PCIEC */
+
static u8 peak_pci_read_reg(const struct sja1000_priv *priv, int port)
{
return readb(priv->reg_base + (port << 2));
@@ -188,17 +637,31 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
+ /* Create chain of SJA1000 devices */
+ chan->prev_dev = pci_get_drvdata(pdev);
+ pci_set_drvdata(pdev, dev);
+
+ /*
+ * PCAN-ExpressCard needs some additional i2c init.
+ * This must be done *before* register_sja1000dev() but
+ * *after* devices linkage
+ */
+ if (pdev->device == PEAK_PCIEC_DEVICE_ID) {
+ err = peak_pciec_probe(pdev, dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to probe device (err %d)\n",
+ err);
+ goto failure_free_dev;
+ }
+ }
+
err = register_sja1000dev(dev);
if (err) {
dev_err(&pdev->dev, "failed to register device\n");
- free_sja1000dev(dev);
- goto failure_remove_channels;
+ goto failure_free_dev;
}
- /* Create chain of SJA1000 devices */
- chan->prev_dev = pci_get_drvdata(pdev);
- pci_set_drvdata(pdev, dev);
-
dev_info(&pdev->dev,
"%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
dev->name, priv->reg_base, chan->cfg_base, dev->irq);
@@ -209,10 +672,15 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
return 0;
+failure_free_dev:
+ pci_set_drvdata(pdev, chan->prev_dev);
+ free_sja1000dev(dev);
+
failure_remove_channels:
/* Disable interrupts */
writew(0x0, cfg_base + PITA_ICR + 2);
+ chan = NULL;
for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
unregister_sja1000dev(dev);
free_sja1000dev(dev);
@@ -220,6 +688,10 @@ failure_remove_channels:
chan = priv->priv;
}
+ /* free any PCIeC resources too */
+ if (chan && chan->pciec_card)
+ peak_pciec_remove(chan->pciec_card);
+
pci_iounmap(pdev, reg_base);
failure_unmap_cfg_base:
@@ -251,8 +723,13 @@ static void __devexit peak_pci_remove(struct pci_dev *pdev)
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = chan->prev_dev;
- if (!dev)
+
+ if (!dev) {
+ /* do that only for first channel */
+ if (chan->pciec_card)
+ peak_pciec_remove(chan->pciec_card);
break;
+ }
priv = netdev_priv(dev);
chan = priv->priv;
}
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
new file mode 100644
index 000000000000..ec6bd9d1b2af
--- /dev/null
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -0,0 +1,753 @@
+/*
+ * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * CAN driver for PEAK-System PCAN-PC Card
+ * Derived from the PCAN project file driver/src/pcan_pccard.c
+ * Copyright (C) 2006-2010 PEAK System-Technik GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include "sja1000.h"
+
+MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card");
+
+/* PEAK-System PCMCIA driver name */
+#define PCC_NAME "peak_pcmcia"
+
+#define PCC_CHAN_MAX 2
+
+#define PCC_CAN_CLOCK (16000000 / 2)
+
+#define PCC_MANF_ID 0x0377
+#define PCC_CARD_ID 0x0001
+
+#define PCC_CHAN_SIZE 0x20
+#define PCC_CHAN_OFF(c) ((c) * PCC_CHAN_SIZE)
+#define PCC_COMN_OFF (PCC_CHAN_OFF(PCC_CHAN_MAX))
+#define PCC_COMN_SIZE 0x40
+
+/* common area registers */
+#define PCC_CCR 0x00
+#define PCC_CSR 0x02
+#define PCC_CPR 0x04
+#define PCC_SPI_DIR 0x06
+#define PCC_SPI_DOR 0x08
+#define PCC_SPI_ADR 0x0a
+#define PCC_SPI_IR 0x0c
+#define PCC_FW_MAJOR 0x10
+#define PCC_FW_MINOR 0x12
+
+/* CCR bits */
+#define PCC_CCR_CLK_16 0x00
+#define PCC_CCR_CLK_10 0x01
+#define PCC_CCR_CLK_21 0x02
+#define PCC_CCR_CLK_8 0x03
+#define PCC_CCR_CLK_MASK PCC_CCR_CLK_8
+
+#define PCC_CCR_RST_CHAN(c) (0x01 << ((c) + 2))
+#define PCC_CCR_RST_ALL (PCC_CCR_RST_CHAN(0) | PCC_CCR_RST_CHAN(1))
+#define PCC_CCR_RST_MASK PCC_CCR_RST_ALL
+
+/* led selection bits */
+#define PCC_LED(c) (1 << (c))
+#define PCC_LED_ALL (PCC_LED(0) | PCC_LED(1))
+
+/* led state value */
+#define PCC_LED_ON 0x00
+#define PCC_LED_FAST 0x01
+#define PCC_LED_SLOW 0x02
+#define PCC_LED_OFF 0x03
+
+#define PCC_CCR_LED_CHAN(s, c) ((s) << (((c) + 2) << 1))
+
+#define PCC_CCR_LED_ON_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_ON, c)
+#define PCC_CCR_LED_FAST_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_FAST, c)
+#define PCC_CCR_LED_SLOW_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_SLOW, c)
+#define PCC_CCR_LED_OFF_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_OFF, c)
+#define PCC_CCR_LED_MASK_CHAN(c) PCC_CCR_LED_OFF_CHAN(c)
+#define PCC_CCR_LED_OFF_ALL (PCC_CCR_LED_OFF_CHAN(0) | \
+ PCC_CCR_LED_OFF_CHAN(1))
+#define PCC_CCR_LED_MASK PCC_CCR_LED_OFF_ALL
+
+#define PCC_CCR_INIT (PCC_CCR_CLK_16 | PCC_CCR_RST_ALL | PCC_CCR_LED_OFF_ALL)
+
+/* CSR bits */
+#define PCC_CSR_SPI_BUSY 0x04
+
+/* time waiting for SPI busy (prevent from infinite loop) */
+#define PCC_SPI_MAX_BUSY_WAIT_MS 3
+
+/* max count of reading the SPI status register waiting for a change */
+/* (prevent from infinite loop) */
+#define PCC_WRITE_MAX_LOOP 1000
+
+/* max nb of int handled by that isr in one shot (prevent from infinite loop) */
+#define PCC_ISR_MAX_LOOP 10
+
+/* EEPROM chip instruction set */
+/* note: EEPROM Read/Write instructions include A8 bit */
+#define PCC_EEP_WRITE(a) (0x02 | (((a) & 0x100) >> 5))
+#define PCC_EEP_READ(a) (0x03 | (((a) & 0x100) >> 5))
+#define PCC_EEP_WRDI 0x04 /* EEPROM Write Disable */
+#define PCC_EEP_RDSR 0x05 /* EEPROM Read Status Register */
+#define PCC_EEP_WREN 0x06 /* EEPROM Write Enable */
+
+/* EEPROM Status Register bits */
+#define PCC_EEP_SR_WEN 0x02 /* EEPROM SR Write Enable bit */
+#define PCC_EEP_SR_WIP 0x01 /* EEPROM SR Write In Progress bit */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PCC_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PCC_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+
+struct pcan_channel {
+ struct net_device *netdev;
+ unsigned long prev_rx_bytes;
+ unsigned long prev_tx_bytes;
+};
+
+/* PCAN-PC Card private structure */
+struct pcan_pccard {
+ struct pcmcia_device *pdev;
+ int chan_count;
+ struct pcan_channel channel[PCC_CHAN_MAX];
+ u8 ccr;
+ u8 fw_major;
+ u8 fw_minor;
+ void __iomem *ioport_addr;
+ struct timer_list led_timer;
+};
+
+static struct pcmcia_device_id pcan_table[] = {
+ PCMCIA_DEVICE_MANF_CARD(PCC_MANF_ID, PCC_CARD_ID),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, pcan_table);
+
+static void pcan_set_leds(struct pcan_pccard *card, u8 mask, u8 state);
+
+/*
+ * start timer which controls leds state
+ */
+static void pcan_start_led_timer(struct pcan_pccard *card)
+{
+ if (!timer_pending(&card->led_timer))
+ mod_timer(&card->led_timer, jiffies + HZ);
+}
+
+/*
+ * stop the timer which controls leds state
+ */
+static void pcan_stop_led_timer(struct pcan_pccard *card)
+{
+ del_timer_sync(&card->led_timer);
+}
+
+/*
+ * read a sja1000 register
+ */
+static u8 pcan_read_canreg(const struct sja1000_priv *priv, int port)
+{
+ return ioread8(priv->reg_base + port);
+}
+
+/*
+ * write a sja1000 register
+ */
+static void pcan_write_canreg(const struct sja1000_priv *priv, int port, u8 v)
+{
+ struct pcan_pccard *card = priv->priv;
+ int c = (priv->reg_base - card->ioport_addr) / PCC_CHAN_SIZE;
+
+ /* sja1000 register changes control the leds state */
+ if (port == REG_MOD)
+ switch (v) {
+ case MOD_RM:
+ /* Reset Mode: set led on */
+ pcan_set_leds(card, PCC_LED(c), PCC_LED_ON);
+ break;
+ case 0x00:
+ /* Normal Mode: led slow blinking and start led timer */
+ pcan_set_leds(card, PCC_LED(c), PCC_LED_SLOW);
+ pcan_start_led_timer(card);
+ break;
+ default:
+ break;
+ }
+
+ iowrite8(v, priv->reg_base + port);
+}
+
+/*
+ * read a register from the common area
+ */
+static u8 pcan_read_reg(struct pcan_pccard *card, int port)
+{
+ return ioread8(card->ioport_addr + PCC_COMN_OFF + port);
+}
+
+/*
+ * write a register into the common area
+ */
+static void pcan_write_reg(struct pcan_pccard *card, int port, u8 v)
+{
+ /* cache ccr value */
+ if (port == PCC_CCR) {
+ if (card->ccr == v)
+ return;
+ card->ccr = v;
+ }
+
+ iowrite8(v, card->ioport_addr + PCC_COMN_OFF + port);
+}
+
+/*
+ * check whether the card is present by checking its fw version numbers
+ * against values read at probing time.
+ */
+static inline int pcan_pccard_present(struct pcan_pccard *card)
+{
+ return ((pcan_read_reg(card, PCC_FW_MAJOR) == card->fw_major) &&
+ (pcan_read_reg(card, PCC_FW_MINOR) == card->fw_minor));
+}
+
+/*
+ * wait for SPI engine while it is busy
+ */
+static int pcan_wait_spi_busy(struct pcan_pccard *card)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(PCC_SPI_MAX_BUSY_WAIT_MS) + 1;
+
+ /* be sure to read status at least once after sleeping */
+ while (pcan_read_reg(card, PCC_CSR) & PCC_CSR_SPI_BUSY) {
+ if (time_after(jiffies, timeout))
+ return -EBUSY;
+ schedule();
+ }
+
+ return 0;
+}
+
+/*
+ * write data in device eeprom
+ */
+static int pcan_write_eeprom(struct pcan_pccard *card, u16 addr, u8 v)
+{
+ u8 status;
+ int err, i;
+
+ /* write instruction enabling write */
+ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_WREN);
+ err = pcan_wait_spi_busy(card);
+ if (err)
+ goto we_spi_err;
+
+ /* wait until write enabled */
+ for (i = 0; i < PCC_WRITE_MAX_LOOP; i++) {
+ /* write instruction reading the status register */
+ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_RDSR);
+ err = pcan_wait_spi_busy(card);
+ if (err)
+ goto we_spi_err;
+
+ /* get status register value and check write enable bit */
+ status = pcan_read_reg(card, PCC_SPI_DIR);
+ if (status & PCC_EEP_SR_WEN)
+ break;
+ }
+
+ if (i >= PCC_WRITE_MAX_LOOP) {
+ dev_err(&card->pdev->dev,
+ "stop waiting to be allowed to write in eeprom\n");
+ return -EIO;
+ }
+
+ /* set address and data */
+ pcan_write_reg(card, PCC_SPI_ADR, addr & 0xff);
+ pcan_write_reg(card, PCC_SPI_DOR, v);
+
+ /*
+ * write instruction with bit[3] set according to address value:
+ * if addr refers to upper half of the memory array: bit[3] = 1
+ */
+ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_WRITE(addr));
+ err = pcan_wait_spi_busy(card);
+ if (err)
+ goto we_spi_err;
+
+ /* wait while write in progress */
+ for (i = 0; i < PCC_WRITE_MAX_LOOP; i++) {
+ /* write instruction reading the status register */
+ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_RDSR);
+ err = pcan_wait_spi_busy(card);
+ if (err)
+ goto we_spi_err;
+
+ /* get status register value and check write in progress bit */
+ status = pcan_read_reg(card, PCC_SPI_DIR);
+ if (!(status & PCC_EEP_SR_WIP))
+ break;
+ }
+
+ if (i >= PCC_WRITE_MAX_LOOP) {
+ dev_err(&card->pdev->dev,
+ "stop waiting for write in eeprom to complete\n");
+ return -EIO;
+ }
+
+ /* write instruction disabling write */
+ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_WRDI);
+ err = pcan_wait_spi_busy(card);
+ if (err)
+ goto we_spi_err;
+
+ return 0;
+
+we_spi_err:
+ dev_err(&card->pdev->dev,
+ "stop waiting (spi engine always busy) err %d\n", err);
+
+ return err;
+}
+
+static void pcan_set_leds(struct pcan_pccard *card, u8 led_mask, u8 state)
+{
+ u8 ccr = card->ccr;
+ int i;
+
+ for (i = 0; i < card->chan_count; i++)
+ if (led_mask & PCC_LED(i)) {
+ /* clear corresponding led bits in ccr */
+ ccr &= ~PCC_CCR_LED_MASK_CHAN(i);
+ /* then set new bits */
+ ccr |= PCC_CCR_LED_CHAN(state, i);
+ }
+
+ /* real write only if something has changed in ccr */
+ pcan_write_reg(card, PCC_CCR, ccr);
+}
+
+/*
+ * enable/disable CAN connectors power
+ */
+static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff)
+{
+ int err;
+
+ err = pcan_write_eeprom(card, 0, !!onoff);
+ if (err)
+ dev_err(&card->pdev->dev,
+ "failed setting power %s to can connectors (err %d)\n",
+ (onoff) ? "on" : "off", err);
+}
+
+/*
+ * set leds state according to channel activity
+ */
+static void pcan_led_timer(unsigned long arg)
+{
+ struct pcan_pccard *card = (struct pcan_pccard *)arg;
+ struct net_device *netdev;
+ int i, up_count = 0;
+ u8 ccr;
+
+ ccr = card->ccr;
+ for (i = 0; i < card->chan_count; i++) {
+ /* default is: not configured */
+ ccr &= ~PCC_CCR_LED_MASK_CHAN(i);
+ ccr |= PCC_CCR_LED_ON_CHAN(i);
+
+ netdev = card->channel[i].netdev;
+ if (!netdev || !(netdev->flags & IFF_UP))
+ continue;
+
+ up_count++;
+
+ /* no activity (but configured) */
+ ccr &= ~PCC_CCR_LED_MASK_CHAN(i);
+ ccr |= PCC_CCR_LED_SLOW_CHAN(i);
+
+ /* if bytes counters changed, set fast blinking led */
+ if (netdev->stats.rx_bytes != card->channel[i].prev_rx_bytes) {
+ card->channel[i].prev_rx_bytes = netdev->stats.rx_bytes;
+ ccr &= ~PCC_CCR_LED_MASK_CHAN(i);
+ ccr |= PCC_CCR_LED_FAST_CHAN(i);
+ }
+ if (netdev->stats.tx_bytes != card->channel[i].prev_tx_bytes) {
+ card->channel[i].prev_tx_bytes = netdev->stats.tx_bytes;
+ ccr &= ~PCC_CCR_LED_MASK_CHAN(i);
+ ccr |= PCC_CCR_LED_FAST_CHAN(i);
+ }
+ }
+
+ /* write the new leds state */
+ pcan_write_reg(card, PCC_CCR, ccr);
+
+ /* restart timer (except if no more configured channels) */
+ if (up_count)
+ mod_timer(&card->led_timer, jiffies + HZ);
+}
+
+/*
+ * interrupt service routine
+ */
+static irqreturn_t pcan_isr(int irq, void *dev_id)
+{
+ struct pcan_pccard *card = dev_id;
+ int irq_handled;
+
+ /* prevent from infinite loop */
+ for (irq_handled = 0; irq_handled < PCC_ISR_MAX_LOOP; irq_handled++) {
+ /* handle shared interrupt and next loop */
+ int nothing_to_handle = 1;
+ int i;
+
+ /* check interrupt for each channel */
+ for (i = 0; i < card->chan_count; i++) {
+ struct net_device *netdev;
+
+ /*
+ * check whether the card is present before calling
+ * sja1000_interrupt() to speed up hotplug detection
+ */
+ if (!pcan_pccard_present(card)) {
+ /* card unplugged during isr */
+ return IRQ_NONE;
+ }
+
+ /*
+ * should check whether all or SJA1000_MAX_IRQ
+ * interrupts have been handled: loop again to be sure.
+ */
+ netdev = card->channel[i].netdev;
+ if (netdev &&
+ sja1000_interrupt(irq, netdev) == IRQ_HANDLED)
+ nothing_to_handle = 0;
+ }
+
+ if (nothing_to_handle)
+ break;
+ }
+
+ return (irq_handled) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * free all resources used by the channels and switch off leds and can power
+ */
+static void pcan_free_channels(struct pcan_pccard *card)
+{
+ int i;
+ u8 led_mask = 0;
+
+ for (i = 0; i < card->chan_count; i++) {
+ struct net_device *netdev;
+ char name[IFNAMSIZ];
+
+ led_mask |= PCC_LED(i);
+
+ netdev = card->channel[i].netdev;
+ if (!netdev)
+ continue;
+
+ strncpy(name, netdev->name, IFNAMSIZ);
+
+ unregister_sja1000dev(netdev);
+
+ free_sja1000dev(netdev);
+
+ dev_info(&card->pdev->dev, "%s removed\n", name);
+ }
+
+ /* do it only if device not removed */
+ if (pcan_pccard_present(card)) {
+ pcan_set_leds(card, led_mask, PCC_LED_OFF);
+ pcan_set_can_power(card, 0);
+ }
+}
+
+/*
+ * check if a CAN controller is present at the specified location
+ */
+static inline int pcan_channel_present(struct sja1000_priv *priv)
+{
+ /* make sure SJA1000 is in reset mode */
+ pcan_write_canreg(priv, REG_MOD, 1);
+ pcan_write_canreg(priv, REG_CDR, CDR_PELICAN);
+
+ /* read reset-values */
+ if (pcan_read_canreg(priv, REG_CDR) == CDR_PELICAN)
+ return 1;
+
+ return 0;
+}
+
+static int pcan_add_channels(struct pcan_pccard *card)
+{
+ struct pcmcia_device *pdev = card->pdev;
+ int i, err = 0;
+ u8 ccr = PCC_CCR_INIT;
+
+ /* init common registers (reset channels and leds off) */
+ card->ccr = ~ccr;
+ pcan_write_reg(card, PCC_CCR, ccr);
+
+ /* wait 2ms before unresetting channels */
+ mdelay(2);
+
+ ccr &= ~PCC_CCR_RST_ALL;
+ pcan_write_reg(card, PCC_CCR, ccr);
+
+ /* create one network device per channel detected */
+ for (i = 0; i < ARRAY_SIZE(card->channel); i++) {
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+
+ netdev = alloc_sja1000dev(0);
+ if (!netdev) {
+ err = -ENOMEM;
+ break;
+ }
+
+ /* update linkages */
+ priv = netdev_priv(netdev);
+ priv->priv = card;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ priv->irq_flags = IRQF_SHARED;
+ netdev->irq = pdev->irq;
+ priv->reg_base = card->ioport_addr + PCC_CHAN_OFF(i);
+
+ /* check if channel is present */
+ if (!pcan_channel_present(priv)) {
+ dev_err(&pdev->dev, "channel %d not present\n", i);
+ free_sja1000dev(netdev);
+ continue;
+ }
+
+ priv->read_reg = pcan_read_canreg;
+ priv->write_reg = pcan_write_canreg;
+ priv->can.clock.freq = PCC_CAN_CLOCK;
+ priv->ocr = PCC_OCR;
+ priv->cdr = PCC_CDR;
+
+ /* Neither a slave device distributes the clock */
+ if (i > 0)
+ priv->cdr |= CDR_CLK_OFF;
+
+ priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER;
+
+ /* register SJA1000 device */
+ err = register_sja1000dev(netdev);
+ if (err) {
+ free_sja1000dev(netdev);
+ continue;
+ }
+
+ card->channel[i].netdev = netdev;
+ card->chan_count++;
+
+ /* set corresponding led on in the new ccr */
+ ccr &= ~PCC_CCR_LED_OFF_CHAN(i);
+
+ dev_info(&pdev->dev,
+ "%s on channel %d at 0x%p irq %d\n",
+ netdev->name, i, priv->reg_base, pdev->irq);
+ }
+
+ /* write new ccr (change leds state) */
+ pcan_write_reg(card, PCC_CCR, ccr);
+
+ return err;
+}
+
+static int pcan_conf_check(struct pcmcia_device *pdev, void *priv_data)
+{
+ pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; /* only */
+ pdev->io_lines = 10;
+
+ /* This reserves IO space but doesn't actually enable it */
+ return pcmcia_request_io(pdev);
+}
+
+/*
+ * free all resources used by the device
+ */
+static void pcan_free(struct pcmcia_device *pdev)
+{
+ struct pcan_pccard *card = pdev->priv;
+
+ if (!card)
+ return;
+
+ free_irq(pdev->irq, card);
+ pcan_stop_led_timer(card);
+
+ pcan_free_channels(card);
+
+ ioport_unmap(card->ioport_addr);
+
+ kfree(card);
+ pdev->priv = NULL;
+}
+
+/*
+ * setup PCMCIA socket and probe for PEAK-System PC-CARD
+ */
+static int __devinit pcan_probe(struct pcmcia_device *pdev)
+{
+ struct pcan_pccard *card;
+ int err;
+
+ pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ err = pcmcia_loop_config(pdev, pcan_conf_check, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "pcmcia_loop_config() error %d\n", err);
+ goto probe_err_1;
+ }
+
+ if (!pdev->irq) {
+ dev_err(&pdev->dev, "no irq assigned\n");
+ err = -ENODEV;
+ goto probe_err_1;
+ }
+
+ err = pcmcia_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pcmcia_enable_device failed err=%d\n",
+ err);
+ goto probe_err_1;
+ }
+
+ card = kzalloc(sizeof(struct pcan_pccard), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev, "couldn't allocate card memory\n");
+ err = -ENOMEM;
+ goto probe_err_2;
+ }
+
+ card->pdev = pdev;
+ pdev->priv = card;
+
+ /* sja1000 api uses iomem */
+ card->ioport_addr = ioport_map(pdev->resource[0]->start,
+ resource_size(pdev->resource[0]));
+ if (!card->ioport_addr) {
+ dev_err(&pdev->dev, "couldn't map io port into io memory\n");
+ err = -ENOMEM;
+ goto probe_err_3;
+ }
+ card->fw_major = pcan_read_reg(card, PCC_FW_MAJOR);
+ card->fw_minor = pcan_read_reg(card, PCC_FW_MINOR);
+
+ /* display board name and firware version */
+ dev_info(&pdev->dev, "PEAK-System pcmcia card %s fw %d.%d\n",
+ pdev->prod_id[1] ? pdev->prod_id[1] : "PCAN-PC Card",
+ card->fw_major, card->fw_minor);
+
+ /* detect available channels */
+ pcan_add_channels(card);
+ if (!card->chan_count)
+ goto probe_err_4;
+
+ /* init the timer which controls the leds */
+ init_timer(&card->led_timer);
+ card->led_timer.function = pcan_led_timer;
+ card->led_timer.data = (unsigned long)card;
+
+ /* request the given irq */
+ err = request_irq(pdev->irq, &pcan_isr, IRQF_SHARED, PCC_NAME, card);
+ if (err) {
+ dev_err(&pdev->dev, "couldn't request irq%d\n", pdev->irq);
+ goto probe_err_5;
+ }
+
+ /* power on the connectors */
+ pcan_set_can_power(card, 1);
+
+ return 0;
+
+probe_err_5:
+ /* unregister can devices from network */
+ pcan_free_channels(card);
+
+probe_err_4:
+ ioport_unmap(card->ioport_addr);
+
+probe_err_3:
+ kfree(card);
+ pdev->priv = NULL;
+
+probe_err_2:
+ pcmcia_disable_device(pdev);
+
+probe_err_1:
+ return err;
+}
+
+/*
+ * release claimed resources
+ */
+static void pcan_remove(struct pcmcia_device *pdev)
+{
+ pcan_free(pdev);
+ pcmcia_disable_device(pdev);
+}
+
+static struct pcmcia_driver pcan_driver = {
+ .name = PCC_NAME,
+ .probe = pcan_probe,
+ .remove = pcan_remove,
+ .id_table = pcan_table,
+};
+
+static int __init pcan_init(void)
+{
+ return pcmcia_register_driver(&pcan_driver);
+}
+module_init(pcan_init);
+
+static void __exit pcan_exit(void)
+{
+ pcmcia_unregister_driver(&pcan_driver);
+}
+module_exit(pcan_exit);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index c7f3d4ea1167..a227586ddd52 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -43,7 +43,8 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"TEWS TECHNOLOGIES TPMC810, "
"esd CAN-PCI/CPCI/PCI104/200, "
"esd CAN-PCI/PMC/266, "
- "esd CAN-PCIe/2000")
+ "esd CAN-PCIe/2000, "
+ "IXXAT PC-I 04/PCI")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -121,6 +122,10 @@ struct plx_pci_card {
#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+#define IXXAT_PCI_VENDOR_ID 0x10b5
+#define IXXAT_PCI_DEVICE_ID 0x9050
+#define IXXAT_PCI_SUB_SYS_ID 0x2540
+
#define MARATHON_PCI_DEVICE_ID 0x2715
#define TEWS_PCI_VENDOR_ID 0x1498
@@ -193,6 +198,14 @@ static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
/* based on PEX8311 */
};
+static struct plx_pci_card_info plx_pci_card_info_ixxat __devinitdata = {
+ "IXXAT PC-I 04/PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x200, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9050 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -267,6 +280,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
(kernel_ulong_t)&plx_pci_card_info_esd2000
},
{
+ /* IXXAT PC-I 04/PCI card */
+ IXXAT_PCI_VENDOR_ID, IXXAT_PCI_DEVICE_ID,
+ PCI_ANY_ID, IXXAT_PCI_SUB_SYS_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_ixxat
+ },
+ {
/* Marathon CAN-bus-PCI card */
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 192b0d118df4..5e10472371ed 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -133,7 +133,7 @@ static void set_reset_mode(struct net_device *dev)
status = priv->read_reg(priv, REG_MOD);
}
- dev_err(dev->dev.parent, "setting SJA1000 into reset mode failed!\n");
+ netdev_err(dev, "setting SJA1000 into reset mode failed!\n");
}
static void set_normal_mode(struct net_device *dev)
@@ -161,7 +161,7 @@ static void set_normal_mode(struct net_device *dev)
status = priv->read_reg(priv, REG_MOD);
}
- dev_err(dev->dev.parent, "setting SJA1000 into normal mode failed!\n");
+ netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
}
static void sja1000_start(struct net_device *dev)
@@ -214,8 +214,7 @@ static int sja1000_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
btr1 |= 0x80;
- dev_info(dev->dev.parent,
- "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+ netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
priv->write_reg(priv, REG_BTR0, btr0);
priv->write_reg(priv, REG_BTR1, btr1);
@@ -383,7 +382,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
- dev_dbg(dev->dev.parent, "data overrun interrupt\n");
+ netdev_dbg(dev, "data overrun interrupt\n");
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
@@ -393,7 +392,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_EI) {
/* error warning interrupt */
- dev_dbg(dev->dev.parent, "error warning interrupt\n");
+ netdev_dbg(dev, "error warning interrupt\n");
if (status & SR_BS) {
state = CAN_STATE_BUS_OFF;
@@ -434,7 +433,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
}
if (isrc & IRQ_EPI) {
/* error passive interrupt */
- dev_dbg(dev->dev.parent, "error passive interrupt\n");
+ netdev_dbg(dev, "error passive interrupt\n");
if (status & SR_ES)
state = CAN_STATE_ERROR_PASSIVE;
else
@@ -442,7 +441,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
}
if (isrc & IRQ_ALI) {
/* arbitration lost interrupt */
- dev_dbg(dev->dev.parent, "arbitration lost interrupt\n");
+ netdev_dbg(dev, "arbitration lost interrupt\n");
alc = priv->read_reg(priv, REG_ALC);
priv->can.can_stats.arbitration_lost++;
stats->tx_errors++;
@@ -503,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
return IRQ_NONE;
if (isrc & IRQ_WUI)
- dev_warn(dev->dev.parent, "wakeup interrupt\n");
+ netdev_warn(dev, "wakeup interrupt\n");
if (isrc & IRQ_TI) {
/* transmission complete interrupt */
@@ -533,7 +532,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
priv->post_irq(priv);
if (n >= SJA1000_MAX_IRQ)
- dev_dbg(dev->dev.parent, "%d messages handled in ISR", n);
+ netdev_dbg(dev, "%d messages handled in ISR", n);
return (n) ? IRQ_HANDLED : IRQ_NONE;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 3f1ebcc2cb83..98a5a7d867f5 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -1,7 +1,7 @@
/*
* slcan.c - serial line CAN interface driver (using tty line discipline)
*
- * This file is derived from linux/drivers/net/slip.c
+ * This file is derived from linux/drivers/net/slip/slip.c
*
* slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
* Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
@@ -639,10 +639,8 @@ static int __init slcan_init(void)
printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
- if (!slcan_devs) {
- printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
+ if (!slcan_devs)
return -ENOMEM;
- }
/* Fill in our line protocol discipline, and register it */
status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5a2e1e3588a1..4accd7ec6954 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -306,7 +306,7 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
if (bit_timing->brp > 4)
can_btc |= HECC_CANBTC_SAM;
else
- dev_warn(priv->ndev->dev.parent, "WARN: Triple" \
+ netdev_warn(priv->ndev, "WARN: Triple"
"sampling not set due to h/w limitations");
}
can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
@@ -315,7 +315,7 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
/* ERM being set to 0 by default meaning resync at falling edge */
hecc_write(priv, HECC_CANBTC, can_btc);
- dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc);
+ netdev_info(priv->ndev, "setting CANBTC=%#x\n", can_btc);
return 0;
}
@@ -332,7 +332,7 @@ static void ti_hecc_reset(struct net_device *ndev)
u32 cnt;
struct ti_hecc_priv *priv = netdev_priv(ndev);
- dev_dbg(ndev->dev.parent, "resetting hecc ...\n");
+ netdev_dbg(ndev, "resetting hecc ...\n");
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
/* Set change control request and wait till enabled */
@@ -458,6 +458,17 @@ static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
return ret;
}
+static int ti_hecc_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct ti_hecc_priv *priv = netdev_priv(ndev);
+
+ bec->txerr = hecc_read(priv, HECC_CANTEC);
+ bec->rxerr = hecc_read(priv, HECC_CANREC);
+
+ return 0;
+}
+
/*
* ti_hecc_xmit: HECC Transmit
*
@@ -496,7 +507,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
spin_unlock_irqrestore(&priv->mbx_lock, flags);
netif_stop_queue(ndev);
- dev_err(priv->ndev->dev.parent,
+ netdev_err(priv->ndev,
"BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
priv->tx_head, priv->tx_tail);
return NETDEV_TX_BUSY;
@@ -550,7 +561,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
skb = alloc_can_skb(priv->ndev, &cf);
if (!skb) {
if (printk_ratelimit())
- dev_err(priv->ndev->dev.parent,
+ netdev_err(priv->ndev,
"ti_hecc_rx_pkt: alloc_can_skb() failed\n");
return -ENOMEM;
}
@@ -668,7 +679,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
skb = alloc_can_err_skb(ndev, &cf);
if (!skb) {
if (printk_ratelimit())
- dev_err(priv->ndev->dev.parent,
+ netdev_err(priv->ndev,
"ti_hecc_error: alloc_can_err_skb() failed\n");
return -ENOMEM;
}
@@ -684,7 +695,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
}
hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
- dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
+ netdev_dbg(priv->ndev, "Error Warning interrupt\n");
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
}
@@ -699,7 +710,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
}
hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
- dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
}
@@ -825,7 +836,7 @@ static int ti_hecc_open(struct net_device *ndev)
err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (err) {
- dev_err(ndev->dev.parent, "error requesting interrupt\n");
+ netdev_err(ndev, "error requesting interrupt\n");
return err;
}
@@ -834,7 +845,7 @@ static int ti_hecc_open(struct net_device *ndev)
/* Open common can device */
err = open_candev(ndev);
if (err) {
- dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
+ netdev_err(ndev, "open_candev() failed %d\n", err);
ti_hecc_transceiver_switch(priv, 0);
free_irq(ndev->irq, ndev);
return err;
@@ -923,6 +934,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
priv->can.do_get_state = ti_hecc_get_state;
+ priv->can.do_get_berr_counter = ti_hecc_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
spin_lock_init(&priv->mbx_lock);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 04525495b15b..0a6876841c20 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,4 +13,10 @@ config CAN_ESD_USB2
This driver supports the CAN-USB/2 interface
from esd electronic system design gmbh (http://www.esd.eu).
+config CAN_PEAK_USB
+ tristate "PEAK PCAN-USB/USB Pro interfaces"
+ ---help---
+ This driver supports the PCAN-USB and PCAN-USB Pro adapters
+ from PEAK-System Technik (http://www.peak-system.com).
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index fce3cf11719f..da6d1d3b2969 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 7dae64d44e83..7ae65fc80032 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -288,8 +288,7 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
return;
default:
- dev_info(netdev->dev.parent, "Rx interrupt aborted %d\n",
- urb->status);
+ netdev_info(netdev, "Rx interrupt aborted %d\n", urb->status);
break;
}
@@ -298,8 +297,7 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
if (err == -ENODEV)
netif_device_detach(netdev);
else if (err)
- dev_err(netdev->dev.parent,
- "failed resubmitting intr urb: %d\n", err);
+ netdev_err(netdev, "failed resubmitting intr urb: %d\n", err);
}
static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -431,8 +429,7 @@ static void ems_usb_read_bulk_callback(struct urb *urb)
return;
default:
- dev_info(netdev->dev.parent, "Rx URB aborted (%d)\n",
- urb->status);
+ netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status);
goto resubmit_urb;
}
@@ -477,7 +474,7 @@ static void ems_usb_read_bulk_callback(struct urb *urb)
msg_count--;
if (start > urb->transfer_buffer_length) {
- dev_err(netdev->dev.parent, "format error\n");
+ netdev_err(netdev, "format error\n");
break;
}
}
@@ -493,8 +490,8 @@ resubmit_urb:
if (retval == -ENODEV)
netif_device_detach(netdev);
else if (retval)
- dev_err(netdev->dev.parent,
- "failed resubmitting read bulk urb: %d\n", retval);
+ netdev_err(netdev,
+ "failed resubmitting read bulk urb: %d\n", retval);
}
/*
@@ -521,8 +518,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
return;
if (urb->status)
- dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
- urb->status);
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
netdev->trans_start = jiffies;
@@ -605,18 +601,18 @@ static int ems_usb_start(struct ems_usb *dev)
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- dev_err(netdev->dev.parent,
- "No memory left for URBs\n");
- return -ENOMEM;
+ netdev_err(netdev, "No memory left for URBs\n");
+ err = -ENOMEM;
+ break;
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
- dev_err(netdev->dev.parent,
- "No memory left for USB buffer\n");
+ netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
- return -ENOMEM;
+ err = -ENOMEM;
+ break;
}
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
@@ -639,13 +635,13 @@ static int ems_usb_start(struct ems_usb *dev)
/* Did we submit any URBs */
if (i == 0) {
- dev_warn(netdev->dev.parent, "couldn't setup read URBs\n");
+ netdev_warn(netdev, "couldn't setup read URBs\n");
return err;
}
/* Warn if we've couldn't transmit all the URBs */
if (i < MAX_RX_URBS)
- dev_warn(netdev->dev.parent, "rx performance may be slow\n");
+ netdev_warn(netdev, "rx performance may be slow\n");
/* Setup and start interrupt URB */
usb_fill_int_urb(dev->intr_urb, dev->udev,
@@ -656,8 +652,7 @@ static int ems_usb_start(struct ems_usb *dev)
err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
if (err) {
- dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
- err);
+ netdev_warn(netdev, "intr URB submit failed: %d\n", err);
return err;
}
@@ -686,7 +681,7 @@ static int ems_usb_start(struct ems_usb *dev)
return 0;
failed:
- dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
+ netdev_warn(netdev, "couldn't submit control: %d\n", err);
return err;
}
@@ -726,8 +721,7 @@ static int ems_usb_open(struct net_device *netdev)
if (err == -ENODEV)
netif_device_detach(dev->netdev);
- dev_warn(netdev->dev.parent, "couldn't start device: %d\n",
- err);
+ netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
@@ -760,13 +754,13 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- dev_err(netdev->dev.parent, "No memory left for URBs\n");
+ netdev_err(netdev, "No memory left for URBs\n");
goto nomem;
}
buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
if (!buf) {
- dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
+ netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
goto nomem;
}
@@ -809,7 +803,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
- dev_warn(netdev->dev.parent, "couldn't find free context\n");
+ netdev_warn(netdev, "couldn't find free context\n");
return NETDEV_TX_BUSY;
}
@@ -840,7 +834,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
if (err == -ENODEV) {
netif_device_detach(netdev);
} else {
- dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
stats->tx_dropped++;
}
@@ -880,7 +874,7 @@ static int ems_usb_close(struct net_device *netdev)
/* Set CAN controller to reset mode */
if (ems_usb_write_mode(dev, SJA1000_MOD_RM))
- dev_warn(netdev->dev.parent, "couldn't stop device");
+ netdev_warn(netdev, "couldn't stop device");
close_candev(netdev);
@@ -917,7 +911,7 @@ static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL))
- dev_warn(netdev->dev.parent, "couldn't start device");
+ netdev_warn(netdev, "couldn't start device");
if (netif_queue_stopped(netdev))
netif_wake_queue(netdev);
@@ -942,8 +936,7 @@ static int ems_usb_set_bittiming(struct net_device *netdev)
if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
btr1 |= 0x80;
- dev_info(netdev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
- btr0, btr1);
+ netdev_info(netdev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0;
dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1;
@@ -1048,15 +1041,13 @@ static int ems_usb_probe(struct usb_interface *intf,
err = ems_usb_command_msg(dev, &dev->active_params);
if (err) {
- dev_err(netdev->dev.parent,
- "couldn't initialize controller: %d\n", err);
+ netdev_err(netdev, "couldn't initialize controller: %d\n", err);
goto cleanup_tx_msg_buffer;
}
err = register_candev(netdev);
if (err) {
- dev_err(netdev->dev.parent,
- "couldn't register CAN device: %d\n", err);
+ netdev_err(netdev, "couldn't register CAN device: %d\n", err);
goto cleanup_tx_msg_buffer;
}
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 92774637aad8..09b1da5bc512 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -470,8 +470,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
return;
if (urb->status)
- dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
- urb->status);
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
netdev->trans_start = jiffies;
}
@@ -651,7 +650,7 @@ failed:
if (err == -ENODEV)
netif_device_detach(netdev);
- dev_err(netdev->dev.parent, "couldn't start device: %d\n", err);
+ netdev_err(netdev, "couldn't start device: %d\n", err);
return err;
}
@@ -687,8 +686,7 @@ static int esd_usb2_open(struct net_device *netdev)
/* finally start device */
err = esd_usb2_start(priv);
if (err) {
- dev_warn(netdev->dev.parent,
- "couldn't start device: %d\n", err);
+ netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
return err;
}
@@ -721,7 +719,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- dev_err(netdev->dev.parent, "No memory left for URBs\n");
+ netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
dev_kfree_skb(skb);
goto nourbmem;
@@ -730,7 +728,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
&urb->transfer_dma);
if (!buf) {
- dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
+ netdev_err(netdev, "No memory left for USB buffer\n");
stats->tx_dropped++;
dev_kfree_skb(skb);
goto nobufmem;
@@ -766,7 +764,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
* This may never happen.
*/
if (!context) {
- dev_warn(netdev->dev.parent, "couldn't find free context\n");
+ netdev_warn(netdev, "couldn't find free context\n");
ret = NETDEV_TX_BUSY;
goto releasebuf;
}
@@ -806,7 +804,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
if (err == -ENODEV)
netif_device_detach(netdev);
else
- dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
goto releasebuf;
}
@@ -845,7 +843,7 @@ static int esd_usb2_close(struct net_device *netdev)
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
msg.msg.filter.mask[i] = 0;
if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
- dev_err(netdev->dev.parent, "sending idadd message failed\n");
+ netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */
msg.msg.hdr.len = 2;
@@ -854,7 +852,7 @@ static int esd_usb2_close(struct net_device *netdev)
msg.msg.setbaud.rsvd = 0;
msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
- dev_err(netdev->dev.parent, "sending setbaud message failed\n");
+ netdev_err(netdev, "sending setbaud message failed\n");
priv->can.state = CAN_STATE_STOPPED;
@@ -910,7 +908,7 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
msg.msg.setbaud.rsvd = 0;
msg.msg.setbaud.baud = cpu_to_le32(canbtr);
- dev_info(netdev->dev.parent, "setting BTR=%#x\n", canbtr);
+ netdev_info(netdev, "setting BTR=%#x\n", canbtr);
return esd_usb2_send_msg(priv->usb2, &msg);
}
@@ -988,15 +986,14 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
err = register_candev(netdev);
if (err) {
- dev_err(&intf->dev,
- "couldn't register CAN device: %d\n", err);
+ dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
free_candev(netdev);
err = -ENOMEM;
goto done;
}
dev->nets[index] = priv;
- dev_info(netdev->dev.parent, "device %s registered\n", netdev->name);
+ netdev_info(netdev, "device %s registered\n", netdev->name);
done:
return err;
diff --git a/drivers/net/can/usb/peak_usb/Makefile b/drivers/net/can/usb/peak_usb/Makefile
new file mode 100644
index 000000000000..1aefbc88d643
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CAN_PEAK_USB) += peak_usb.o
+peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
new file mode 100644
index 000000000000..86f26a1ede4c
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -0,0 +1,899 @@
+/*
+ * CAN driver for PEAK System PCAN-USB adapter
+ * Derived from the PCAN project file driver/src/pcan_usb.c
+ *
+ * Copyright (C) 2003-2010 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "pcan_usb_core.h"
+
+MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
+
+/* PCAN-USB Endpoints */
+#define PCAN_USB_EP_CMDOUT 1
+#define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN)
+#define PCAN_USB_EP_MSGOUT 2
+#define PCAN_USB_EP_MSGIN (PCAN_USB_EP_MSGOUT | USB_DIR_IN)
+
+/* PCAN-USB command struct */
+#define PCAN_USB_CMD_FUNC 0
+#define PCAN_USB_CMD_NUM 1
+#define PCAN_USB_CMD_ARGS 2
+#define PCAN_USB_CMD_ARGS_LEN 14
+#define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \
+ PCAN_USB_CMD_ARGS_LEN)
+
+/* PCAN-USB command timeout (ms.) */
+#define PCAN_USB_COMMAND_TIMEOUT 1000
+
+/* PCAN-USB startup timeout (ms.) */
+#define PCAN_USB_STARTUP_TIMEOUT 10
+
+/* PCAN-USB rx/tx buffers size */
+#define PCAN_USB_RX_BUFFER_SIZE 64
+#define PCAN_USB_TX_BUFFER_SIZE 64
+
+#define PCAN_USB_MSG_HEADER_LEN 2
+
+/* PCAN-USB adapter internal clock (MHz) */
+#define PCAN_USB_CRYSTAL_HZ 16000000
+
+/* PCAN-USB USB message record status/len field */
+#define PCAN_USB_STATUSLEN_TIMESTAMP (1 << 7)
+#define PCAN_USB_STATUSLEN_INTERNAL (1 << 6)
+#define PCAN_USB_STATUSLEN_EXT_ID (1 << 5)
+#define PCAN_USB_STATUSLEN_RTR (1 << 4)
+#define PCAN_USB_STATUSLEN_DLC (0xf)
+
+/* PCAN-USB error flags */
+#define PCAN_USB_ERROR_TXFULL 0x01
+#define PCAN_USB_ERROR_RXQOVR 0x02
+#define PCAN_USB_ERROR_BUS_LIGHT 0x04
+#define PCAN_USB_ERROR_BUS_HEAVY 0x08
+#define PCAN_USB_ERROR_BUS_OFF 0x10
+#define PCAN_USB_ERROR_RXQEMPTY 0x20
+#define PCAN_USB_ERROR_QOVR 0x40
+#define PCAN_USB_ERROR_TXQFULL 0x80
+
+/* SJA1000 modes */
+#define SJA1000_MODE_NORMAL 0x00
+#define SJA1000_MODE_INIT 0x01
+
+/*
+ * tick duration = 42.666 us =>
+ * (tick_number * 44739243) >> 20 ~ (tick_number * 42666) / 1000
+ * accuracy = 10^-7
+ */
+#define PCAN_USB_TS_DIV_SHIFTER 20
+#define PCAN_USB_TS_US_PER_TICK 44739243
+
+/* PCAN-USB messages record types */
+#define PCAN_USB_REC_ERROR 1
+#define PCAN_USB_REC_ANALOG 2
+#define PCAN_USB_REC_BUSLOAD 3
+#define PCAN_USB_REC_TS 4
+#define PCAN_USB_REC_BUSEVT 5
+
+/* private to PCAN-USB adapter */
+struct pcan_usb {
+ struct peak_usb_device dev;
+ struct peak_time_ref time_ref;
+ struct timer_list restart_timer;
+};
+
+/* incoming message context for decoding */
+struct pcan_usb_msg_context {
+ u16 ts16;
+ u8 prev_ts8;
+ u8 *ptr;
+ u8 *end;
+ u8 rec_cnt;
+ u8 rec_idx;
+ u8 rec_data_idx;
+ struct net_device *netdev;
+ struct pcan_usb *pdev;
+};
+
+/*
+ * send a command
+ */
+static int pcan_usb_send_cmd(struct peak_usb_device *dev, u8 f, u8 n, u8 *p)
+{
+ int err;
+ int actual_length;
+
+ /* usb device unregistered? */
+ if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ return 0;
+
+ dev->cmd_buf[PCAN_USB_CMD_FUNC] = f;
+ dev->cmd_buf[PCAN_USB_CMD_NUM] = n;
+
+ if (p)
+ memcpy(dev->cmd_buf + PCAN_USB_CMD_ARGS,
+ p, PCAN_USB_CMD_ARGS_LEN);
+
+ err = usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT),
+ dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length,
+ PCAN_USB_COMMAND_TIMEOUT);
+ if (err)
+ netdev_err(dev->netdev,
+ "sending cmd f=0x%x n=0x%x failure: %d\n",
+ f, n, err);
+ return err;
+}
+
+/*
+ * send a command then wait for its response
+ */
+static int pcan_usb_wait_rsp(struct peak_usb_device *dev, u8 f, u8 n, u8 *p)
+{
+ int err;
+ int actual_length;
+
+ /* usb device unregistered? */
+ if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ return 0;
+
+ /* first, send command */
+ err = pcan_usb_send_cmd(dev, f, n, NULL);
+ if (err)
+ return err;
+
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, PCAN_USB_EP_CMDIN),
+ dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length,
+ PCAN_USB_COMMAND_TIMEOUT);
+ if (err)
+ netdev_err(dev->netdev,
+ "waiting rsp f=0x%x n=0x%x failure: %d\n", f, n, err);
+ else if (p)
+ memcpy(p, dev->cmd_buf + PCAN_USB_CMD_ARGS,
+ PCAN_USB_CMD_ARGS_LEN);
+
+ return err;
+}
+
+static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [1] = mode,
+ };
+
+ return pcan_usb_send_cmd(dev, 9, 2, args);
+}
+
+static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = !!onoff,
+ };
+
+ return pcan_usb_send_cmd(dev, 3, 2, args);
+}
+
+static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = !!onoff,
+ };
+
+ return pcan_usb_send_cmd(dev, 3, 3, args);
+}
+
+static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = !!onoff,
+ };
+
+ return pcan_usb_send_cmd(dev, 10, 2, args);
+}
+
+/*
+ * set bittiming value to can
+ */
+static int pcan_usb_set_bittiming(struct peak_usb_device *dev,
+ struct can_bittiming *bt)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ netdev_info(dev->netdev, "setting BTR0=0x%02x BTR1=0x%02x\n",
+ btr0, btr1);
+
+ args[0] = btr1;
+ args[1] = btr0;
+
+ return pcan_usb_send_cmd(dev, 1, 2, args);
+}
+
+/*
+ * init/reset can
+ */
+static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
+{
+ int err;
+
+ err = pcan_usb_set_bus(dev, onoff);
+ if (err)
+ return err;
+
+ if (!onoff) {
+ err = pcan_usb_set_sja1000(dev, SJA1000_MODE_INIT);
+ } else {
+ /* the PCAN-USB needs time to init */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
+ }
+
+ return err;
+}
+
+/*
+ * handle end of waiting for the device to reset
+ */
+static void pcan_usb_restart(unsigned long arg)
+{
+ /* notify candev and netdev */
+ peak_usb_restart_complete((struct peak_usb_device *)arg);
+}
+
+/*
+ * handle the submission of the restart urb
+ */
+static void pcan_usb_restart_pending(struct urb *urb)
+{
+ struct pcan_usb *pdev = urb->context;
+
+ /* the PCAN-USB needs time to restart */
+ mod_timer(&pdev->restart_timer,
+ jiffies + msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
+
+ /* can delete usb resources */
+ peak_usb_async_complete(urb);
+}
+
+/*
+ * handle asynchronous restart
+ */
+static int pcan_usb_restart_async(struct peak_usb_device *dev, struct urb *urb,
+ u8 *buf)
+{
+ struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+
+ if (timer_pending(&pdev->restart_timer))
+ return -EBUSY;
+
+ /* set bus on */
+ buf[PCAN_USB_CMD_FUNC] = 3;
+ buf[PCAN_USB_CMD_NUM] = 2;
+ buf[PCAN_USB_CMD_ARGS] = 1;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT),
+ buf, PCAN_USB_CMD_LEN,
+ pcan_usb_restart_pending, pdev);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * read serial number from device
+ */
+static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+ int err;
+
+ err = pcan_usb_wait_rsp(dev, 6, 1, args);
+ if (err) {
+ netdev_err(dev->netdev, "getting serial failure: %d\n", err);
+ } else if (serial_number) {
+ u32 tmp32;
+
+ memcpy(&tmp32, args, 4);
+ *serial_number = le32_to_cpu(tmp32);
+ }
+
+ return err;
+}
+
+/*
+ * read device id from device
+ */
+static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+ int err;
+
+ err = pcan_usb_wait_rsp(dev, 4, 1, args);
+ if (err)
+ netdev_err(dev->netdev, "getting device id failure: %d\n", err);
+ else if (device_id)
+ *device_id = args[0];
+
+ return err;
+}
+
+/*
+ * update current time ref with received timestamp
+ */
+static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc)
+{
+ u16 tmp16;
+
+ if ((mc->ptr+2) > mc->end)
+ return -EINVAL;
+
+ memcpy(&tmp16, mc->ptr, 2);
+
+ mc->ts16 = le16_to_cpu(tmp16);
+
+ if (mc->rec_idx > 0)
+ peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16);
+ else
+ peak_usb_set_ts_now(&mc->pdev->time_ref, mc->ts16);
+
+ return 0;
+}
+
+/*
+ * decode received timestamp
+ */
+static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet)
+{
+ /* only 1st packet supplies a word timestamp */
+ if (first_packet) {
+ u16 tmp16;
+
+ if ((mc->ptr + 2) > mc->end)
+ return -EINVAL;
+
+ memcpy(&tmp16, mc->ptr, 2);
+ mc->ptr += 2;
+
+ mc->ts16 = le16_to_cpu(tmp16);
+ mc->prev_ts8 = mc->ts16 & 0x00ff;
+ } else {
+ u8 ts8;
+
+ if ((mc->ptr + 1) > mc->end)
+ return -EINVAL;
+
+ ts8 = *mc->ptr++;
+
+ if (ts8 < mc->prev_ts8)
+ mc->ts16 += 0x100;
+
+ mc->ts16 &= 0xff00;
+ mc->ts16 |= ts8;
+ mc->prev_ts8 = ts8;
+ }
+
+ return 0;
+}
+
+static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
+ u8 status_len)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct timeval tv;
+ enum can_state new_state;
+
+ /* ignore this error until 1st ts received */
+ if (n == PCAN_USB_ERROR_QOVR)
+ if (!mc->pdev->time_ref.tick_count)
+ return 0;
+
+ new_state = mc->pdev->dev.can.state;
+
+ switch (mc->pdev->dev.can.state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ if (n & PCAN_USB_ERROR_BUS_LIGHT) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ break;
+ }
+
+ case CAN_STATE_ERROR_WARNING:
+ if (n & PCAN_USB_ERROR_BUS_HEAVY) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ }
+ if (n & PCAN_USB_ERROR_BUS_OFF) {
+ new_state = CAN_STATE_BUS_OFF;
+ break;
+ }
+ if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ /*
+ * trick to bypass next comparison and process other
+ * errors
+ */
+ new_state = CAN_STATE_MAX;
+ break;
+ }
+ if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
+ /* no error (back to active state) */
+ mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+ }
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ if (n & PCAN_USB_ERROR_BUS_OFF) {
+ new_state = CAN_STATE_BUS_OFF;
+ break;
+ }
+ if (n & PCAN_USB_ERROR_BUS_LIGHT) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ break;
+ }
+ if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ /*
+ * trick to bypass next comparison and process other
+ * errors
+ */
+ new_state = CAN_STATE_MAX;
+ break;
+ }
+
+ if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
+ /* no error (back to active state) */
+ mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+ }
+ break;
+
+ default:
+ /* do nothing waiting for restart */
+ return 0;
+ }
+
+ /* donot post any error if current state didn't change */
+ if (mc->pdev->dev.can.state == new_state)
+ return 0;
+
+ /* allocate an skb to store the error frame */
+ skb = alloc_can_err_skb(mc->netdev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ switch (new_state) {
+ case CAN_STATE_BUS_OFF:
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(mc->netdev);
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE |
+ CAN_ERR_CRTL_RX_PASSIVE;
+ mc->pdev->dev.can.can_stats.error_passive++;
+ break;
+
+ case CAN_STATE_ERROR_WARNING:
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_TX_WARNING |
+ CAN_ERR_CRTL_RX_WARNING;
+ mc->pdev->dev.can.can_stats.error_warning++;
+ break;
+
+ default:
+ /* CAN_STATE_MAX (trick to handle other errors) */
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ mc->netdev->stats.rx_over_errors++;
+ mc->netdev->stats.rx_errors++;
+
+ new_state = mc->pdev->dev.can.state;
+ break;
+ }
+
+ mc->pdev->dev.can.state = new_state;
+
+ if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
+ peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ }
+
+ netif_rx(skb);
+ mc->netdev->stats.rx_packets++;
+ mc->netdev->stats.rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+/*
+ * decode non-data usb message
+ */
+static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
+ u8 status_len)
+{
+ u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
+ u8 f, n;
+ int err;
+
+ /* check whether function and number can be read */
+ if ((mc->ptr + 2) > mc->end)
+ return -EINVAL;
+
+ f = mc->ptr[PCAN_USB_CMD_FUNC];
+ n = mc->ptr[PCAN_USB_CMD_NUM];
+ mc->ptr += PCAN_USB_CMD_ARGS;
+
+ if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
+ int err = pcan_usb_decode_ts(mc, !mc->rec_idx);
+
+ if (err)
+ return err;
+ }
+
+ switch (f) {
+ case PCAN_USB_REC_ERROR:
+ err = pcan_usb_decode_error(mc, n, status_len);
+ if (err)
+ return err;
+ break;
+
+ case PCAN_USB_REC_ANALOG:
+ /* analog values (ignored) */
+ rec_len = 2;
+ break;
+
+ case PCAN_USB_REC_BUSLOAD:
+ /* bus load (ignored) */
+ rec_len = 1;
+ break;
+
+ case PCAN_USB_REC_TS:
+ /* only timestamp */
+ if (pcan_usb_update_ts(mc))
+ return -EINVAL;
+ break;
+
+ case PCAN_USB_REC_BUSEVT:
+ /* error frame/bus event */
+ if (n & PCAN_USB_ERROR_TXQFULL)
+ netdev_dbg(mc->netdev, "device Tx queue full)\n");
+ break;
+ default:
+ netdev_err(mc->netdev, "unexpected function %u\n", f);
+ break;
+ }
+
+ if ((mc->ptr + rec_len) > mc->end)
+ return -EINVAL;
+
+ mc->ptr += rec_len;
+
+ return 0;
+}
+
+/*
+ * decode data usb message
+ */
+static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
+{
+ u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct timeval tv;
+
+ skb = alloc_can_skb(mc->netdev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ if (status_len & PCAN_USB_STATUSLEN_EXT_ID) {
+ u32 tmp32;
+
+ if ((mc->ptr + 4) > mc->end)
+ goto decode_failed;
+
+ memcpy(&tmp32, mc->ptr, 4);
+ mc->ptr += 4;
+
+ cf->can_id = le32_to_cpu(tmp32 >> 3) | CAN_EFF_FLAG;
+ } else {
+ u16 tmp16;
+
+ if ((mc->ptr + 2) > mc->end)
+ goto decode_failed;
+
+ memcpy(&tmp16, mc->ptr, 2);
+ mc->ptr += 2;
+
+ cf->can_id = le16_to_cpu(tmp16 >> 5);
+ }
+
+ cf->can_dlc = get_can_dlc(rec_len);
+
+ /* first data packet timestamp is a word */
+ if (pcan_usb_decode_ts(mc, !mc->rec_data_idx))
+ goto decode_failed;
+
+ /* read data */
+ memset(cf->data, 0x0, sizeof(cf->data));
+ if (status_len & PCAN_USB_STATUSLEN_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ if ((mc->ptr + rec_len) > mc->end)
+ goto decode_failed;
+
+ memcpy(cf->data, mc->ptr, rec_len);
+ mc->ptr += rec_len;
+ }
+
+ /* convert timestamp into kernel time */
+ peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
+ skb->tstamp = timeval_to_ktime(tv);
+
+ /* push the skb */
+ netif_rx(skb);
+
+ /* update statistics */
+ mc->netdev->stats.rx_packets++;
+ mc->netdev->stats.rx_bytes += cf->can_dlc;
+
+ return 0;
+
+decode_failed:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+}
+
+/*
+ * process incoming message
+ */
+static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
+{
+ struct pcan_usb_msg_context mc = {
+ .rec_cnt = ibuf[1],
+ .ptr = ibuf + PCAN_USB_MSG_HEADER_LEN,
+ .end = ibuf + lbuf,
+ .netdev = dev->netdev,
+ .pdev = container_of(dev, struct pcan_usb, dev),
+ };
+ int err;
+
+ for (err = 0; mc.rec_idx < mc.rec_cnt && !err; mc.rec_idx++) {
+ u8 sl = *mc.ptr++;
+
+ /* handle status and error frames here */
+ if (sl & PCAN_USB_STATUSLEN_INTERNAL) {
+ err = pcan_usb_decode_status(&mc, sl);
+ /* handle normal can frames here */
+ } else {
+ err = pcan_usb_decode_data(&mc, sl);
+ mc.rec_data_idx++;
+ }
+ }
+
+ return err;
+}
+
+/*
+ * process any incoming buffer
+ */
+static int pcan_usb_decode_buf(struct peak_usb_device *dev, struct urb *urb)
+{
+ int err = 0;
+
+ if (urb->actual_length > PCAN_USB_MSG_HEADER_LEN) {
+ err = pcan_usb_decode_msg(dev, urb->transfer_buffer,
+ urb->actual_length);
+
+ } else if (urb->actual_length > 0) {
+ netdev_err(dev->netdev, "usb message length error (%u)\n",
+ urb->actual_length);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/*
+ * process outgoing packet
+ */
+static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
+ u8 *obuf, size_t *size)
+{
+ struct net_device *netdev = dev->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 *pc;
+
+ obuf[0] = 2;
+ obuf[1] = 1;
+
+ pc = obuf + PCAN_USB_MSG_HEADER_LEN;
+
+ /* status/len byte */
+ *pc = cf->can_dlc;
+ if (cf->can_id & CAN_RTR_FLAG)
+ *pc |= PCAN_USB_STATUSLEN_RTR;
+
+ /* can id */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ __le32 tmp32 = cpu_to_le32((cf->can_id & CAN_ERR_MASK) << 3);
+
+ *pc |= PCAN_USB_STATUSLEN_EXT_ID;
+ memcpy(++pc, &tmp32, 4);
+ pc += 4;
+ } else {
+ __le16 tmp16 = cpu_to_le16((cf->can_id & CAN_ERR_MASK) << 5);
+
+ memcpy(++pc, &tmp16, 2);
+ pc += 2;
+ }
+
+ /* can data */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ memcpy(pc, cf->data, cf->can_dlc);
+ pc += cf->can_dlc;
+ }
+
+ obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff);
+
+ return 0;
+}
+
+/*
+ * start interface
+ */
+static int pcan_usb_start(struct peak_usb_device *dev)
+{
+ struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+
+ /* number of bits used in timestamps read from adapter struct */
+ peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb);
+
+ /* if revision greater than 3, can put silent mode on/off */
+ if (dev->device_rev > 3) {
+ int err;
+
+ err = pcan_usb_set_silent(dev,
+ dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY);
+ if (err)
+ return err;
+ }
+
+ return pcan_usb_set_ext_vcc(dev, 0);
+}
+
+static int pcan_usb_init(struct peak_usb_device *dev)
+{
+ struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+ u32 serial_number;
+ int err;
+
+ /* initialize a timer needed to wait for hardware restart */
+ init_timer(&pdev->restart_timer);
+ pdev->restart_timer.function = pcan_usb_restart;
+ pdev->restart_timer.data = (unsigned long)dev;
+
+ /*
+ * explicit use of dev_xxx() instead of netdev_xxx() here:
+ * information displayed are related to the device itself, not
+ * to the canx netdevice.
+ */
+ err = pcan_usb_get_serial(dev, &serial_number);
+ if (err) {
+ dev_err(dev->netdev->dev.parent,
+ "unable to read %s serial number (err %d)\n",
+ pcan_usb.name, err);
+ return err;
+ }
+
+ dev_info(dev->netdev->dev.parent,
+ "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
+ pcan_usb.name, dev->device_rev, serial_number,
+ pcan_usb.ctrl_count);
+
+ return 0;
+}
+
+/*
+ * probe function for new PCAN-USB usb interface
+ */
+static int pcan_usb_probe(struct usb_interface *intf)
+{
+ struct usb_host_interface *if_desc;
+ int i;
+
+ if_desc = intf->altsetting;
+
+ /* check interface endpoint addresses */
+ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc;
+
+ switch (ep->bEndpointAddress) {
+ case PCAN_USB_EP_CMDOUT:
+ case PCAN_USB_EP_CMDIN:
+ case PCAN_USB_EP_MSGOUT:
+ case PCAN_USB_EP_MSGIN:
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * describe the PCAN-USB adapter
+ */
+struct peak_usb_adapter pcan_usb = {
+ .name = "PCAN-USB",
+ .device_id = PCAN_USB_PRODUCT_ID,
+ .ctrl_count = 1,
+ .clock = {
+ .freq = PCAN_USB_CRYSTAL_HZ / 2 ,
+ },
+ .bittiming_const = {
+ .name = "pcan_usb",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+ },
+
+ /* size of device private data */
+ .sizeof_dev_private = sizeof(struct pcan_usb),
+
+ /* timestamps usage */
+ .ts_used_bits = 16,
+ .ts_period = 24575, /* calibration period in ts. */
+ .us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */
+ .us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */
+
+ /* give here messages in/out endpoints */
+ .ep_msg_in = PCAN_USB_EP_MSGIN,
+ .ep_msg_out = {PCAN_USB_EP_MSGOUT},
+
+ /* size of rx/tx usb buffers */
+ .rx_buffer_size = PCAN_USB_RX_BUFFER_SIZE,
+ .tx_buffer_size = PCAN_USB_TX_BUFFER_SIZE,
+
+ /* device callbacks */
+ .intf_probe = pcan_usb_probe,
+ .dev_init = pcan_usb_init,
+ .dev_set_bus = pcan_usb_write_mode,
+ .dev_set_bittiming = pcan_usb_set_bittiming,
+ .dev_get_device_id = pcan_usb_get_device_id,
+ .dev_decode_buf = pcan_usb_decode_buf,
+ .dev_encode_msg = pcan_usb_encode_msg,
+ .dev_start = pcan_usb_start,
+ .dev_restart_async = pcan_usb_restart_async,
+};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
new file mode 100644
index 000000000000..d2f91f737871
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -0,0 +1,951 @@
+/*
+ * CAN driver for PEAK System USB adapters
+ * Derived from the PCAN project file driver/src/pcan_usb_core.c
+ *
+ * Copyright (C) 2003-2010 PEAK System-Technik GmbH
+ * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "pcan_usb_core.h"
+
+MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
+MODULE_LICENSE("GPL v2");
+
+/* Table of devices that work with this driver */
+static struct usb_device_id peak_usb_table[] = {
+ {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)},
+ {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, peak_usb_table);
+
+/* List of supported PCAN-USB adapters (NULL terminated list) */
+static struct peak_usb_adapter *peak_usb_adapters_list[] = {
+ &pcan_usb,
+ &pcan_usb_pro,
+ NULL,
+};
+
+/*
+ * dump memory
+ */
+#define DUMP_WIDTH 16
+void dump_mem(char *prompt, void *p, int l)
+{
+ pr_info("%s dumping %s (%d bytes):\n",
+ PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
+ print_hex_dump(KERN_INFO, PCAN_USB_DRIVER_NAME " ", DUMP_PREFIX_NONE,
+ DUMP_WIDTH, 1, p, l, false);
+}
+
+/*
+ * initialize a time_ref object with usb adapter own settings
+ */
+void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
+ struct peak_usb_adapter *adapter)
+{
+ if (time_ref) {
+ memset(time_ref, 0, sizeof(struct peak_time_ref));
+ time_ref->adapter = adapter;
+ }
+}
+
+static void peak_usb_add_us(struct timeval *tv, u32 delta_us)
+{
+ /* number of s. to add to final time */
+ u32 delta_s = delta_us / 1000000;
+
+ delta_us -= delta_s * 1000000;
+
+ tv->tv_usec += delta_us;
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ delta_s++;
+ }
+ tv->tv_sec += delta_s;
+}
+
+/*
+ * sometimes, another now may be more recent than current one...
+ */
+void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
+{
+ time_ref->ts_dev_2 = ts_now;
+
+ /* should wait at least two passes before computing */
+ if (time_ref->tv_host.tv_sec > 0) {
+ u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
+
+ if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
+ delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
+
+ time_ref->ts_total += delta_ts;
+ }
+}
+
+/*
+ * register device timestamp as now
+ */
+void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
+{
+ if (time_ref->tv_host_0.tv_sec == 0) {
+ /* use monotonic clock to correctly compute further deltas */
+ time_ref->tv_host_0 = ktime_to_timeval(ktime_get());
+ time_ref->tv_host.tv_sec = 0;
+ } else {
+ /*
+ * delta_us should not be >= 2^32 => delta_s should be < 4294
+ * handle 32-bits wrapping here: if count of s. reaches 4200,
+ * reset counters and change time base
+ */
+ if (time_ref->tv_host.tv_sec != 0) {
+ u32 delta_s = time_ref->tv_host.tv_sec
+ - time_ref->tv_host_0.tv_sec;
+ if (delta_s > 4200) {
+ time_ref->tv_host_0 = time_ref->tv_host;
+ time_ref->ts_total = 0;
+ }
+ }
+
+ time_ref->tv_host = ktime_to_timeval(ktime_get());
+ time_ref->tick_count++;
+ }
+
+ time_ref->ts_dev_1 = time_ref->ts_dev_2;
+ peak_usb_update_ts_now(time_ref, ts_now);
+}
+
+/*
+ * compute timeval according to current ts and time_ref data
+ */
+void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
+ struct timeval *tv)
+{
+ /* protect from getting timeval before setting now */
+ if (time_ref->tv_host.tv_sec > 0) {
+ u64 delta_us;
+
+ delta_us = ts - time_ref->ts_dev_2;
+ if (ts < time_ref->ts_dev_2)
+ delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
+
+ delta_us += time_ref->ts_total;
+
+ delta_us *= time_ref->adapter->us_per_ts_scale;
+ delta_us >>= time_ref->adapter->us_per_ts_shift;
+
+ *tv = time_ref->tv_host_0;
+ peak_usb_add_us(tv, (u32)delta_us);
+ } else {
+ *tv = ktime_to_timeval(ktime_get());
+ }
+}
+
+/*
+ * callback for bulk Rx urb
+ */
+static void peak_usb_read_bulk_callback(struct urb *urb)
+{
+ struct peak_usb_device *dev = urb->context;
+ struct net_device *netdev;
+ int err;
+
+ netdev = dev->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ /* check reception status */
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+
+ case -EILSEQ:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "Rx urb aborted (%d)\n", urb->status);
+ goto resubmit_urb;
+ }
+
+ /* protect from any incoming empty msgs */
+ if ((urb->actual_length > 0) && (dev->adapter->dev_decode_buf)) {
+ /* handle these kinds of msgs only if _start callback called */
+ if (dev->state & PCAN_USB_STATE_STARTED) {
+ err = dev->adapter->dev_decode_buf(dev, urb);
+ if (err)
+ dump_mem("received usb message",
+ urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ }
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev, dev->ep_msg_in),
+ urb->transfer_buffer, dev->adapter->rx_buffer_size,
+ peak_usb_read_bulk_callback, dev);
+
+ usb_anchor_urb(urb, &dev->rx_submitted);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ return;
+
+ usb_unanchor_urb(urb);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_err(netdev, "failed resubmitting read bulk urb: %d\n",
+ err);
+}
+
+/*
+ * callback for bulk Tx urb
+ */
+static void peak_usb_write_bulk_callback(struct urb *urb)
+{
+ struct peak_tx_urb_context *context = urb->context;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
+
+ BUG_ON(!context);
+
+ dev = context->dev;
+ netdev = dev->netdev;
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ /* check tx status */
+ switch (urb->status) {
+ case 0:
+ /* transmission complete */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += context->dlc;
+
+ /* prevent tx timeout */
+ netdev->trans_start = jiffies;
+ break;
+
+ default:
+ if (net_ratelimit())
+ netdev_err(netdev, "Tx urb aborted (%d)\n",
+ urb->status);
+ case -EPROTO:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+
+ break;
+ }
+
+ /* should always release echo skb and corresponding context */
+ can_get_echo_skb(netdev, context->echo_index);
+ context->echo_index = PCAN_USB_MAX_TX_URBS;
+
+ /* do wakeup tx queue in case of success only */
+ if (!urb->status)
+ netif_wake_queue(netdev);
+}
+
+/*
+ * called by netdev to send one skb on the CAN interface.
+ */
+static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ struct peak_tx_urb_context *context = NULL;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct urb *urb;
+ u8 *obuf;
+ int i, err;
+ size_t size = dev->adapter->tx_buffer_size;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
+ if (dev->tx_contexts[i].echo_index == PCAN_USB_MAX_TX_URBS) {
+ context = dev->tx_contexts + i;
+ break;
+ }
+
+ if (!context) {
+ /* should not occur except during restart */
+ return NETDEV_TX_BUSY;
+ }
+
+ urb = context->urb;
+ obuf = urb->transfer_buffer;
+
+ err = dev->adapter->dev_encode_msg(dev, skb, obuf, &size);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(netdev, "packet dropped\n");
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ context->echo_index = i;
+ context->dlc = cf->can_dlc;
+
+ usb_anchor_urb(urb, &dev->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
+ atomic_inc(&dev->active_tx_urbs);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ can_free_echo_skb(netdev, context->echo_index);
+
+ usb_unanchor_urb(urb);
+
+ /* this context is not used in fact */
+ context->echo_index = PCAN_USB_MAX_TX_URBS;
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ switch (err) {
+ case -ENODEV:
+ netif_device_detach(netdev);
+ break;
+ default:
+ netdev_warn(netdev, "tx urb submitting failed err=%d\n",
+ err);
+ case -ENOENT:
+ /* cable unplugged */
+ stats->tx_dropped++;
+ }
+ } else {
+ netdev->trans_start = jiffies;
+
+ /* slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS)
+ netif_stop_queue(netdev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * start the CAN interface.
+ * Rx and Tx urbs are allocated here. Rx urbs are submitted here.
+ */
+static int peak_usb_start(struct peak_usb_device *dev)
+{
+ struct net_device *netdev = dev->netdev;
+ int err, i;
+
+ for (i = 0; i < PCAN_USB_MAX_RX_URBS; i++) {
+ struct urb *urb;
+ u8 *buf;
+
+ /* create a URB, and a buffer for it, to receive usb messages */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev, dev->ep_msg_in),
+ buf, dev->adapter->rx_buffer_size,
+ peak_usb_read_bulk_callback, dev);
+
+ /* ask last usb_free_urb() to also kfree() transfer_buffer */
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ usb_unanchor_urb(urb);
+ kfree(buf);
+ usb_free_urb(urb);
+ break;
+ }
+
+ /* drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ }
+
+ /* did we submit any URBs? Warn if we was not able to submit all urbs */
+ if (i < PCAN_USB_MAX_RX_URBS) {
+ if (i == 0) {
+ netdev_err(netdev, "couldn't setup any rx URB\n");
+ return err;
+ }
+
+ netdev_warn(netdev, "rx performance may be slow\n");
+ }
+
+ /* pre-alloc tx buffers and corresponding urbs */
+ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
+ struct peak_tx_urb_context *context;
+ struct urb *urb;
+ u8 *buf;
+
+ /* create a URB and a buffer for it, to transmit usb messages */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ context = dev->tx_contexts + i;
+ context->dev = dev;
+ context->urb = urb;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, dev->ep_msg_out),
+ buf, dev->adapter->tx_buffer_size,
+ peak_usb_write_bulk_callback, context);
+
+ /* ask last usb_free_urb() to also kfree() transfer_buffer */
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ }
+
+ /* warn if we were not able to allocate enough tx contexts */
+ if (i < PCAN_USB_MAX_TX_URBS) {
+ if (i == 0) {
+ netdev_err(netdev, "couldn't setup any tx URB\n");
+ return err;
+ }
+
+ netdev_warn(netdev, "tx performance may be slow\n");
+ }
+
+ if (dev->adapter->dev_start) {
+ err = dev->adapter->dev_start(dev);
+ if (err)
+ goto failed;
+ }
+
+ dev->state |= PCAN_USB_STATE_STARTED;
+
+ /* can set bus on now */
+ if (dev->adapter->dev_set_bus) {
+ err = dev->adapter->dev_set_bus(dev, 1);
+ if (err)
+ goto failed;
+ }
+
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+failed:
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ netdev_warn(netdev, "couldn't submit control: %d\n", err);
+
+ return err;
+}
+
+/*
+ * called by netdev to open the corresponding CAN interface.
+ */
+static int peak_usb_ndo_open(struct net_device *netdev)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ int err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ /* finally start device */
+ err = peak_usb_start(dev);
+ if (err) {
+ netdev_err(netdev, "couldn't start device: %d\n", err);
+ close_candev(netdev);
+ return err;
+ }
+
+ dev->open_time = jiffies;
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+/*
+ * unlink in-flight Rx and Tx urbs and free their memory.
+ */
+static void peak_usb_unlink_all_urbs(struct peak_usb_device *dev)
+{
+ int i;
+
+ /* free all Rx (submitted) urbs */
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ /* free unsubmitted Tx urbs first */
+ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
+ struct urb *urb = dev->tx_contexts[i].urb;
+
+ if (!urb ||
+ dev->tx_contexts[i].echo_index != PCAN_USB_MAX_TX_URBS) {
+ /*
+ * this urb is already released or always submitted,
+ * let usb core free by itself
+ */
+ continue;
+ }
+
+ usb_free_urb(urb);
+ dev->tx_contexts[i].urb = NULL;
+ }
+
+ /* then free all submitted Tx urbs */
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+}
+
+/*
+ * called by netdev to close the corresponding CAN interface.
+ */
+static int peak_usb_ndo_stop(struct net_device *netdev)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+
+ dev->state &= ~PCAN_USB_STATE_STARTED;
+ netif_stop_queue(netdev);
+
+ /* unlink all pending urbs and free used memory */
+ peak_usb_unlink_all_urbs(dev);
+
+ if (dev->adapter->dev_stop)
+ dev->adapter->dev_stop(dev);
+
+ close_candev(netdev);
+
+ dev->open_time = 0;
+ dev->can.state = CAN_STATE_STOPPED;
+
+ /* can set bus off now */
+ if (dev->adapter->dev_set_bus) {
+ int err = dev->adapter->dev_set_bus(dev, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * handle end of waiting for the device to reset
+ */
+void peak_usb_restart_complete(struct peak_usb_device *dev)
+{
+ /* finally MUST update can state */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* netdev queue can be awaken now */
+ netif_wake_queue(dev->netdev);
+}
+
+void peak_usb_async_complete(struct urb *urb)
+{
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+}
+
+/*
+ * device (auto-)restart mechanism runs in a timer context =>
+ * MUST handle restart with asynchronous usb transfers
+ */
+static int peak_usb_restart(struct peak_usb_device *dev)
+{
+ struct urb *urb;
+ int err;
+ u8 *buf;
+
+ /*
+ * if device doesn't define any asynchronous restart handler, simply
+ * wake the netdev queue up
+ */
+ if (!dev->adapter->dev_restart_async) {
+ peak_usb_restart_complete(dev);
+ return 0;
+ }
+
+ /* first allocate a urb to handle the asynchronous steps */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(dev->netdev, "no memory left for urb\n");
+ return -ENOMEM;
+ }
+
+ /* also allocate enough space for the commands to send */
+ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC);
+ if (!buf) {
+ netdev_err(dev->netdev, "no memory left for async cmd\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ /* call the device specific handler for the restart */
+ err = dev->adapter->dev_restart_async(dev, urb, buf);
+ if (!err)
+ return 0;
+
+ kfree(buf);
+ usb_free_urb(urb);
+
+ return err;
+}
+
+/*
+ * candev callback used to change CAN mode.
+ * Warning: this is called from a timer context!
+ */
+static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ int err = 0;
+
+ if (!dev->open_time)
+ return -EINVAL;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = peak_usb_restart(dev);
+ if (err)
+ netdev_err(netdev, "couldn't start device (err %d)\n",
+ err);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/*
+ * candev callback used to set device bitrate.
+ */
+static int peak_usb_set_bittiming(struct net_device *netdev)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+
+ if (dev->adapter->dev_set_bittiming) {
+ int err = dev->adapter->dev_set_bittiming(dev, bt);
+
+ if (err)
+ netdev_info(netdev, "couldn't set bitrate (err %d)\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops peak_usb_netdev_ops = {
+ .ndo_open = peak_usb_ndo_open,
+ .ndo_stop = peak_usb_ndo_stop,
+ .ndo_start_xmit = peak_usb_ndo_start_xmit,
+};
+
+/*
+ * create one device which is attached to CAN controller #ctrl_idx of the
+ * usb adapter.
+ */
+static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+ struct usb_interface *intf, int ctrl_idx)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ int sizeof_candev = peak_usb_adapter->sizeof_dev_private;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
+ int i, err;
+ u16 tmp16;
+
+ if (sizeof_candev < sizeof(struct peak_usb_device))
+ sizeof_candev = sizeof(struct peak_usb_device);
+
+ netdev = alloc_candev(sizeof_candev, PCAN_USB_MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "%s: couldn't alloc candev\n",
+ PCAN_USB_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ dev = netdev_priv(netdev);
+
+ /* allocate a buffer large enough to send commands */
+ dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
+ if (!dev->cmd_buf) {
+ dev_err(&intf->dev, "%s: couldn't alloc cmd buffer\n",
+ PCAN_USB_DRIVER_NAME);
+ err = -ENOMEM;
+ goto lbl_set_intf_data;
+ }
+
+ dev->udev = usb_dev;
+ dev->netdev = netdev;
+ dev->adapter = peak_usb_adapter;
+ dev->ctrl_idx = ctrl_idx;
+ dev->state = PCAN_USB_STATE_CONNECTED;
+
+ dev->ep_msg_in = peak_usb_adapter->ep_msg_in;
+ dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
+
+ dev->can.clock = peak_usb_adapter->clock;
+ dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
+ dev->can.do_set_bittiming = peak_usb_set_bittiming;
+ dev->can.do_set_mode = peak_usb_set_mode;
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
+
+ netdev->netdev_ops = &peak_usb_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ init_usb_anchor(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
+ dev->tx_contexts[i].echo_index = PCAN_USB_MAX_TX_URBS;
+
+ dev->prev_siblings = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, dev);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+ goto lbl_free_cmd_buf;
+ }
+
+ if (dev->prev_siblings)
+ (dev->prev_siblings)->next_siblings = dev;
+
+ /* keep hw revision into the netdevice */
+ tmp16 = le16_to_cpu(usb_dev->descriptor.bcdDevice);
+ dev->device_rev = tmp16 >> 8;
+
+ if (dev->adapter->dev_init) {
+ err = dev->adapter->dev_init(dev);
+ if (err)
+ goto lbl_free_cmd_buf;
+ }
+
+ /* set bus off */
+ if (dev->adapter->dev_set_bus) {
+ err = dev->adapter->dev_set_bus(dev, 0);
+ if (err)
+ goto lbl_free_cmd_buf;
+ }
+
+ /* get device number early */
+ if (dev->adapter->dev_get_device_id)
+ dev->adapter->dev_get_device_id(dev, &dev->device_number);
+
+ netdev_info(netdev, "attached to %s channel %u (device %u)\n",
+ peak_usb_adapter->name, ctrl_idx, dev->device_number);
+
+ return 0;
+
+lbl_free_cmd_buf:
+ kfree(dev->cmd_buf);
+
+lbl_set_intf_data:
+ usb_set_intfdata(intf, dev->prev_siblings);
+ free_candev(netdev);
+
+ return err;
+}
+
+/*
+ * called by the usb core when the device is unplugged from the system
+ */
+static void peak_usb_disconnect(struct usb_interface *intf)
+{
+ struct peak_usb_device *dev;
+
+ /* unregister as many netdev devices as siblings */
+ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
+ struct net_device *netdev = dev->netdev;
+ char name[IFNAMSIZ];
+
+ dev->state &= ~PCAN_USB_STATE_CONNECTED;
+ strncpy(name, netdev->name, IFNAMSIZ);
+
+ unregister_netdev(netdev);
+ free_candev(netdev);
+
+ kfree(dev->cmd_buf);
+ dev->next_siblings = NULL;
+ if (dev->adapter->dev_free)
+ dev->adapter->dev_free(dev);
+
+ dev_info(&intf->dev, "%s removed\n", name);
+ }
+
+ usb_set_intfdata(intf, NULL);
+}
+
+/*
+ * probe function for new PEAK-System devices
+ */
+static int peak_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct peak_usb_adapter *peak_usb_adapter, **pp;
+ int i, err = -ENOMEM;
+
+ usb_dev = interface_to_usbdev(intf);
+
+ /* get corresponding PCAN-USB adapter */
+ for (pp = peak_usb_adapters_list; *pp; pp++)
+ if ((*pp)->device_id == usb_dev->descriptor.idProduct)
+ break;
+
+ peak_usb_adapter = *pp;
+ if (!peak_usb_adapter) {
+ /* should never come except device_id bad usage in this file */
+ pr_err("%s: didn't find device id. 0x%x in devices list\n",
+ PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct);
+ return -ENODEV;
+ }
+
+ /* got corresponding adapter: check if it handles current interface */
+ if (peak_usb_adapter->intf_probe) {
+ err = peak_usb_adapter->intf_probe(intf);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < peak_usb_adapter->ctrl_count; i++) {
+ err = peak_usb_create_dev(peak_usb_adapter, intf, i);
+ if (err) {
+ /* deregister already created devices */
+ peak_usb_disconnect(intf);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver peak_usb_driver = {
+ .name = PCAN_USB_DRIVER_NAME,
+ .disconnect = peak_usb_disconnect,
+ .probe = peak_usb_probe,
+ .id_table = peak_usb_table,
+};
+
+static int __init peak_usb_init(void)
+{
+ int err;
+
+ /* register this driver with the USB subsystem */
+ err = usb_register(&peak_usb_driver);
+ if (err)
+ pr_err("%s: usb_register failed (err %d)\n",
+ PCAN_USB_DRIVER_NAME, err);
+
+ return err;
+}
+
+static int peak_usb_do_device_exit(struct device *d, void *arg)
+{
+ struct usb_interface *intf = to_usb_interface(d);
+ struct peak_usb_device *dev;
+
+ /* stop as many netdev devices as siblings */
+ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
+ struct net_device *netdev = dev->netdev;
+
+ if (netif_device_present(netdev))
+ if (dev->adapter->dev_exit)
+ dev->adapter->dev_exit(dev);
+ }
+
+ return 0;
+}
+
+static void __exit peak_usb_exit(void)
+{
+ int err;
+
+ /* last chance do send any synchronous commands here */
+ err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL,
+ NULL, peak_usb_do_device_exit);
+ if (err)
+ pr_err("%s: failed to stop all can devices (err %d)\n",
+ PCAN_USB_DRIVER_NAME, err);
+
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&peak_usb_driver);
+
+ pr_info("%s: PCAN-USB interfaces driver unloaded\n",
+ PCAN_USB_DRIVER_NAME);
+}
+
+module_init(peak_usb_init);
+module_exit(peak_usb_exit);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
new file mode 100644
index 000000000000..a948c5a89401
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -0,0 +1,146 @@
+/*
+ * CAN driver for PEAK System USB adapters
+ * Derived from the PCAN project file driver/src/pcan_usb_core.c
+ *
+ * Copyright (C) 2003-2010 PEAK System-Technik GmbH
+ * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef PCAN_USB_CORE_H
+#define PCAN_USB_CORE_H
+
+/* PEAK-System vendor id. */
+#define PCAN_USB_VENDOR_ID 0x0c72
+
+/* supported device ids. */
+#define PCAN_USB_PRODUCT_ID 0x000c
+#define PCAN_USBPRO_PRODUCT_ID 0x000d
+
+#define PCAN_USB_DRIVER_NAME "peak_usb"
+
+/* number of urbs that are submitted for rx/tx per channel */
+#define PCAN_USB_MAX_RX_URBS 4
+#define PCAN_USB_MAX_TX_URBS 10
+
+/* usb adapters maximum channels per usb interface */
+#define PCAN_USB_MAX_CHANNEL 2
+
+/* maximum length of the usb commands sent to/received from the devices */
+#define PCAN_USB_MAX_CMD_LEN 32
+
+struct peak_usb_device;
+
+/* PEAK-System USB adapter descriptor */
+struct peak_usb_adapter {
+ char *name;
+ u32 device_id;
+ struct can_clock clock;
+ struct can_bittiming_const bittiming_const;
+ unsigned int ctrl_count;
+
+ int (*intf_probe)(struct usb_interface *intf);
+
+ int (*dev_init)(struct peak_usb_device *dev);
+ void (*dev_exit)(struct peak_usb_device *dev);
+ void (*dev_free)(struct peak_usb_device *dev);
+ int (*dev_open)(struct peak_usb_device *dev);
+ int (*dev_close)(struct peak_usb_device *dev);
+ int (*dev_set_bittiming)(struct peak_usb_device *dev,
+ struct can_bittiming *bt);
+ int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
+ int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
+ int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
+ int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
+ u8 *obuf, size_t *size);
+ int (*dev_start)(struct peak_usb_device *dev);
+ int (*dev_stop)(struct peak_usb_device *dev);
+ int (*dev_restart_async)(struct peak_usb_device *dev, struct urb *urb,
+ u8 *buf);
+ u8 ep_msg_in;
+ u8 ep_msg_out[PCAN_USB_MAX_CHANNEL];
+ u8 ts_used_bits;
+ u32 ts_period;
+ u8 us_per_ts_shift;
+ u32 us_per_ts_scale;
+
+ int rx_buffer_size;
+ int tx_buffer_size;
+ int sizeof_dev_private;
+};
+
+extern struct peak_usb_adapter pcan_usb;
+extern struct peak_usb_adapter pcan_usb_pro;
+
+struct peak_time_ref {
+ struct timeval tv_host_0, tv_host;
+ u32 ts_dev_1, ts_dev_2;
+ u64 ts_total;
+ u32 tick_count;
+ struct peak_usb_adapter *adapter;
+};
+
+struct peak_tx_urb_context {
+ struct peak_usb_device *dev;
+ u32 echo_index;
+ u8 dlc;
+ struct urb *urb;
+};
+
+#define PCAN_USB_STATE_CONNECTED 0x00000001
+#define PCAN_USB_STATE_STARTED 0x00000002
+
+/* PEAK-System USB device */
+struct peak_usb_device {
+ struct can_priv can;
+ struct peak_usb_adapter *adapter;
+ unsigned int ctrl_idx;
+ int open_time;
+ u32 state;
+
+ struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS];
+
+ struct usb_device *udev;
+ struct net_device *netdev;
+
+ atomic_t active_tx_urbs;
+ struct usb_anchor tx_submitted;
+ struct peak_tx_urb_context tx_contexts[PCAN_USB_MAX_TX_URBS];
+
+ u8 *cmd_buf;
+ struct usb_anchor rx_submitted;
+
+ u32 device_number;
+ u8 device_rev;
+
+ u8 ep_msg_in;
+ u8 ep_msg_out;
+
+ u16 bus_load;
+
+ struct peak_usb_device *prev_siblings;
+ struct peak_usb_device *next_siblings;
+};
+
+void dump_mem(char *prompt, void *p, int l);
+
+/* common timestamp management */
+void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
+ struct peak_usb_adapter *adapter);
+void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
+void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
+void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
+ struct timeval *tv);
+
+void peak_usb_async_complete(struct urb *urb);
+void peak_usb_restart_complete(struct peak_usb_device *dev);
+#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
new file mode 100644
index 000000000000..5234586dff15
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -0,0 +1,1036 @@
+/*
+ * CAN driver for PEAK System PCAN-USB Pro adapter
+ * Derived from the PCAN project file driver/src/pcan_usbpro.c
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "pcan_usb_core.h"
+#include "pcan_usb_pro.h"
+
+MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter");
+
+/* PCAN-USB Pro Endpoints */
+#define PCAN_USBPRO_EP_CMDOUT 1
+#define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
+#define PCAN_USBPRO_EP_MSGOUT_0 2
+#define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
+#define PCAN_USBPRO_EP_MSGOUT_1 3
+#define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
+
+#define PCAN_USBPRO_CHANNEL_COUNT 2
+
+/* PCAN-USB Pro adapter internal clock (MHz) */
+#define PCAN_USBPRO_CRYSTAL_HZ 56000000
+
+/* PCAN-USB Pro command timeout (ms.) */
+#define PCAN_USBPRO_COMMAND_TIMEOUT 1000
+
+/* PCAN-USB Pro rx/tx buffers size */
+#define PCAN_USBPRO_RX_BUFFER_SIZE 1024
+#define PCAN_USBPRO_TX_BUFFER_SIZE 64
+
+#define PCAN_USBPRO_MSG_HEADER_LEN 4
+
+/* some commands responses need to be re-submitted */
+#define PCAN_USBPRO_RSP_SUBMIT_MAX 2
+
+#define PCAN_USBPRO_RTR 0x01
+#define PCAN_USBPRO_EXT 0x02
+
+#define PCAN_USBPRO_CMD_BUFFER_SIZE 512
+
+/* handle device specific info used by the netdevices */
+struct pcan_usb_pro_interface {
+ struct peak_usb_device *dev[PCAN_USBPRO_CHANNEL_COUNT];
+ struct peak_time_ref time_ref;
+ int cm_ignore_count;
+ int dev_opened_count;
+};
+
+/* device information */
+struct pcan_usb_pro_device {
+ struct peak_usb_device dev;
+ struct pcan_usb_pro_interface *usb_if;
+ u32 cached_ccbt;
+};
+
+/* internal structure used to handle messages sent to bulk urb */
+struct pcan_usb_pro_msg {
+ u8 *rec_ptr;
+ int rec_buffer_size;
+ int rec_buffer_len;
+ union {
+ u16 *rec_cnt_rd;
+ u32 *rec_cnt;
+ u8 *rec_buffer;
+ } u;
+};
+
+/* records sizes table indexed on message id. (8-bits value) */
+static u16 pcan_usb_pro_sizeof_rec[256] = {
+ [PCAN_USBPRO_SETBTR] = sizeof(struct pcan_usb_pro_btr),
+ [PCAN_USBPRO_SETBUSACT] = sizeof(struct pcan_usb_pro_busact),
+ [PCAN_USBPRO_SETSILENT] = sizeof(struct pcan_usb_pro_silent),
+ [PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter),
+ [PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts),
+ [PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid),
+ [PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled),
+ [PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg),
+ [PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4,
+ [PCAN_USBPRO_RXMSG0] = sizeof(struct pcan_usb_pro_rxmsg) - 8,
+ [PCAN_USBPRO_RXRTR] = sizeof(struct pcan_usb_pro_rxmsg) - 8,
+ [PCAN_USBPRO_RXSTATUS] = sizeof(struct pcan_usb_pro_rxstatus),
+ [PCAN_USBPRO_RXTS] = sizeof(struct pcan_usb_pro_rxts),
+ [PCAN_USBPRO_TXMSG8] = sizeof(struct pcan_usb_pro_txmsg),
+ [PCAN_USBPRO_TXMSG4] = sizeof(struct pcan_usb_pro_txmsg) - 4,
+ [PCAN_USBPRO_TXMSG0] = sizeof(struct pcan_usb_pro_txmsg) - 8,
+};
+
+/*
+ * initialize PCAN-USB Pro message data structure
+ */
+static u8 *pcan_msg_init(struct pcan_usb_pro_msg *pm, void *buffer_addr,
+ int buffer_size)
+{
+ if (buffer_size < PCAN_USBPRO_MSG_HEADER_LEN)
+ return NULL;
+
+ pm->u.rec_buffer = (u8 *)buffer_addr;
+ pm->rec_buffer_size = pm->rec_buffer_len = buffer_size;
+ pm->rec_ptr = pm->u.rec_buffer + PCAN_USBPRO_MSG_HEADER_LEN;
+
+ return pm->rec_ptr;
+}
+
+static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm,
+ void *buffer_addr, int buffer_size)
+{
+ u8 *pr = pcan_msg_init(pm, buffer_addr, buffer_size);
+
+ if (pr) {
+ pm->rec_buffer_len = PCAN_USBPRO_MSG_HEADER_LEN;
+ *pm->u.rec_cnt = 0;
+ }
+ return pr;
+}
+
+/*
+ * add one record to a message being built
+ */
+static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
+{
+ int len, i;
+ u8 *pc;
+ va_list ap;
+
+ va_start(ap, id);
+
+ pc = pm->rec_ptr + 1;
+
+ i = 0;
+ switch (id) {
+ case PCAN_USBPRO_TXMSG8:
+ i += 4;
+ case PCAN_USBPRO_TXMSG4:
+ i += 4;
+ case PCAN_USBPRO_TXMSG0:
+ *pc++ = va_arg(ap, int);
+ *pc++ = va_arg(ap, int);
+ *pc++ = va_arg(ap, int);
+ *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ pc += 4;
+ memcpy(pc, va_arg(ap, int *), i);
+ pc += i;
+ break;
+
+ case PCAN_USBPRO_SETBTR:
+ case PCAN_USBPRO_GETDEVID:
+ *pc++ = va_arg(ap, int);
+ pc += 2;
+ *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ pc += 4;
+ break;
+
+ case PCAN_USBPRO_SETFILTR:
+ case PCAN_USBPRO_SETBUSACT:
+ case PCAN_USBPRO_SETSILENT:
+ *pc++ = va_arg(ap, int);
+ *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ pc += 2;
+ break;
+
+ case PCAN_USBPRO_SETLED:
+ *pc++ = va_arg(ap, int);
+ *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ pc += 2;
+ *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ pc += 4;
+ break;
+
+ case PCAN_USBPRO_SETTS:
+ pc++;
+ *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ pc += 2;
+ break;
+
+ default:
+ pr_err("%s: %s(): unknown data type %02Xh (%d)\n",
+ PCAN_USB_DRIVER_NAME, __func__, id, id);
+ pc--;
+ break;
+ }
+
+ len = pc - pm->rec_ptr;
+ if (len > 0) {
+ *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1);
+ *pm->rec_ptr = id;
+
+ pm->rec_ptr = pc;
+ pm->rec_buffer_len += len;
+ }
+
+ va_end(ap);
+
+ return len;
+}
+
+/*
+ * send PCAN-USB Pro command synchronously
+ */
+static int pcan_usb_pro_send_cmd(struct peak_usb_device *dev,
+ struct pcan_usb_pro_msg *pum)
+{
+ int actual_length;
+ int err;
+
+ /* usb device unregistered? */
+ if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ return 0;
+
+ err = usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ pum->u.rec_buffer, pum->rec_buffer_len,
+ &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT);
+ if (err)
+ netdev_err(dev->netdev, "sending command failure: %d\n", err);
+
+ return err;
+}
+
+/*
+ * wait for PCAN-USB Pro command response
+ */
+static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
+ struct pcan_usb_pro_msg *pum)
+{
+ u8 req_data_type, req_channel;
+ int actual_length;
+ int i, err = 0;
+
+ /* usb device unregistered? */
+ if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ return 0;
+
+ req_data_type = pum->u.rec_buffer[4];
+ req_channel = pum->u.rec_buffer[5];
+
+ *pum->u.rec_cnt = 0;
+ for (i = 0; !err && i < PCAN_USBPRO_RSP_SUBMIT_MAX; i++) {
+ struct pcan_usb_pro_msg rsp;
+ union pcan_usb_pro_rec *pr;
+ u32 r, rec_cnt;
+ u16 rec_len;
+ u8 *pc;
+
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDIN),
+ pum->u.rec_buffer, pum->rec_buffer_len,
+ &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT);
+ if (err) {
+ netdev_err(dev->netdev, "waiting rsp error %d\n", err);
+ break;
+ }
+
+ if (actual_length == 0)
+ continue;
+
+ err = -EBADMSG;
+ if (actual_length < PCAN_USBPRO_MSG_HEADER_LEN) {
+ netdev_err(dev->netdev,
+ "got abnormal too small rsp (len=%d)\n",
+ actual_length);
+ break;
+ }
+
+ pc = pcan_msg_init(&rsp, pum->u.rec_buffer,
+ actual_length);
+
+ rec_cnt = le32_to_cpu(*rsp.u.rec_cnt);
+
+ /* loop on records stored into message */
+ for (r = 0; r < rec_cnt; r++) {
+ pr = (union pcan_usb_pro_rec *)pc;
+ rec_len = pcan_usb_pro_sizeof_rec[pr->data_type];
+ if (!rec_len) {
+ netdev_err(dev->netdev,
+ "got unprocessed record in msg\n");
+ dump_mem("rcvd rsp msg", pum->u.rec_buffer,
+ actual_length);
+ break;
+ }
+
+ /* check if response corresponds to request */
+ if (pr->data_type != req_data_type)
+ netdev_err(dev->netdev,
+ "got unwanted rsp %xh: ignored\n",
+ pr->data_type);
+
+ /* check if channel in response corresponds too */
+ else if ((req_channel != 0xff) && \
+ (pr->bus_act.channel != req_channel))
+ netdev_err(dev->netdev,
+ "got rsp %xh but on chan%u: ignored\n",
+ req_data_type, pr->bus_act.channel);
+
+ /* got the response */
+ else
+ return 0;
+
+ /* otherwise, go on with next record in message */
+ pc += rec_len;
+ }
+ }
+
+ return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err;
+}
+
+static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+ int req_value, void *req_addr, int req_size)
+{
+ int err;
+ u8 req_type;
+ unsigned int p;
+
+ /* usb device unregistered? */
+ if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+ return 0;
+
+ memset(req_addr, '\0', req_size);
+
+ req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
+
+ switch (req_id) {
+ case PCAN_USBPRO_REQ_FCT:
+ p = usb_sndctrlpipe(dev->udev, 0);
+ break;
+
+ default:
+ p = usb_rcvctrlpipe(dev->udev, 0);
+ req_type |= USB_DIR_IN;
+ break;
+ }
+
+ err = usb_control_msg(dev->udev, p, req_id, req_type, req_value, 0,
+ req_addr, req_size, 2 * USB_CTRL_GET_TIMEOUT);
+ if (err < 0) {
+ netdev_info(dev->netdev,
+ "unable to request usb[type=%d value=%d] err=%d\n",
+ req_id, req_value, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int pcan_usb_pro_set_ts(struct peak_usb_device *dev, u16 onoff)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETTS, onoff);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
+static int pcan_usb_pro_set_bitrate(struct peak_usb_device *dev, u32 ccbt)
+{
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETBTR, dev->ctrl_idx, ccbt);
+
+ /* cache the CCBT value to reuse it before next buson */
+ pdev->cached_ccbt = ccbt;
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
+static int pcan_usb_pro_set_bus(struct peak_usb_device *dev, u8 onoff)
+{
+ struct pcan_usb_pro_msg um;
+
+ /* if bus=on, be sure the bitrate being set before! */
+ if (onoff) {
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+
+ pcan_usb_pro_set_bitrate(dev, pdev->cached_ccbt);
+ }
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, onoff);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
+static int pcan_usb_pro_set_silent(struct peak_usb_device *dev, u8 onoff)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETSILENT, dev->ctrl_idx, onoff);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
+static int pcan_usb_pro_set_filter(struct peak_usb_device *dev, u16 filter_mode)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETFILTR, dev->ctrl_idx, filter_mode);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
+static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode,
+ u32 timeout)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETLED, dev->ctrl_idx, mode, timeout);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
+static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
+ u32 *device_id)
+{
+ struct pcan_usb_pro_devid *pdn;
+ struct pcan_usb_pro_msg um;
+ int err;
+ u8 *pc;
+
+ pc = pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_GETDEVID, dev->ctrl_idx);
+
+ err = pcan_usb_pro_send_cmd(dev, &um);
+ if (err)
+ return err;
+
+ err = pcan_usb_pro_wait_rsp(dev, &um);
+ if (err)
+ return err;
+
+ pdn = (struct pcan_usb_pro_devid *)pc;
+ if (device_id)
+ *device_id = le32_to_cpu(pdn->serial_num);
+
+ return err;
+}
+
+static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
+ struct can_bittiming *bt)
+{
+ u32 ccbt;
+
+ ccbt = (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 0x00800000 : 0;
+ ccbt |= (bt->sjw - 1) << 24;
+ ccbt |= (bt->phase_seg2 - 1) << 20;
+ ccbt |= (bt->prop_seg + bt->phase_seg1 - 1) << 16; /* = tseg1 */
+ ccbt |= bt->brp - 1;
+
+ netdev_info(dev->netdev, "setting ccbt=0x%08x\n", ccbt);
+
+ return pcan_usb_pro_set_bitrate(dev, ccbt);
+}
+
+static void pcan_usb_pro_restart_complete(struct urb *urb)
+{
+ /* can delete usb resources */
+ peak_usb_async_complete(urb);
+
+ /* notify candev and netdev */
+ peak_usb_restart_complete(urb->context);
+}
+
+/*
+ * handle restart but in asynchronously way
+ */
+static int pcan_usb_pro_restart_async(struct peak_usb_device *dev,
+ struct urb *urb, u8 *buf)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, 1);
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ buf, PCAN_USB_MAX_CMD_LEN,
+ pcan_usb_pro_restart_complete, dev);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
+{
+ u8 buffer[16];
+
+ buffer[0] = 0;
+ buffer[1] = !!loaded;
+
+ pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
+ PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer));
+}
+
+static inline
+struct pcan_usb_pro_interface *pcan_usb_pro_dev_if(struct peak_usb_device *dev)
+{
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+ return pdev->usb_if;
+}
+
+static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
+ struct pcan_usb_pro_rxmsg *rx)
+{
+ const unsigned int ctrl_idx = (rx->len >> 4) & 0x0f;
+ struct peak_usb_device *dev = usb_if->dev[ctrl_idx];
+ struct net_device *netdev = dev->netdev;
+ struct can_frame *can_frame;
+ struct sk_buff *skb;
+ struct timeval tv;
+
+ skb = alloc_can_skb(netdev, &can_frame);
+ if (!skb)
+ return -ENOMEM;
+
+ can_frame->can_id = le32_to_cpu(rx->id);
+ can_frame->can_dlc = rx->len & 0x0f;
+
+ if (rx->flags & PCAN_USBPRO_EXT)
+ can_frame->can_id |= CAN_EFF_FLAG;
+
+ if (rx->flags & PCAN_USBPRO_RTR)
+ can_frame->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(can_frame->data, rx->data, can_frame->can_dlc);
+
+ peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
+ skb->tstamp = timeval_to_ktime(tv);
+
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += can_frame->can_dlc;
+
+ return 0;
+}
+
+static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
+ struct pcan_usb_pro_rxstatus *er)
+{
+ const u32 raw_status = le32_to_cpu(er->status);
+ const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f;
+ struct peak_usb_device *dev = usb_if->dev[ctrl_idx];
+ struct net_device *netdev = dev->netdev;
+ struct can_frame *can_frame;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+ u8 err_mask = 0;
+ struct sk_buff *skb;
+ struct timeval tv;
+
+ /* nothing should be sent while in BUS_OFF state */
+ if (dev->can.state == CAN_STATE_BUS_OFF)
+ return 0;
+
+ if (!raw_status) {
+ /* no error bit (back to active state) */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+ }
+
+ if (raw_status & (PCAN_USBPRO_STATUS_OVERRUN |
+ PCAN_USBPRO_STATUS_QOVERRUN)) {
+ /* trick to bypass next comparison and process other errors */
+ new_state = CAN_STATE_MAX;
+ }
+
+ if (raw_status & PCAN_USBPRO_STATUS_BUS) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (raw_status & PCAN_USBPRO_STATUS_ERROR) {
+ u32 rx_err_cnt = (le32_to_cpu(er->err_frm) & 0x00ff0000) >> 16;
+ u32 tx_err_cnt = (le32_to_cpu(er->err_frm) & 0xff000000) >> 24;
+
+ if (rx_err_cnt > 127)
+ err_mask |= CAN_ERR_CRTL_RX_PASSIVE;
+ else if (rx_err_cnt > 96)
+ err_mask |= CAN_ERR_CRTL_RX_WARNING;
+
+ if (tx_err_cnt > 127)
+ err_mask |= CAN_ERR_CRTL_TX_PASSIVE;
+ else if (tx_err_cnt > 96)
+ err_mask |= CAN_ERR_CRTL_TX_WARNING;
+
+ if (err_mask & (CAN_ERR_CRTL_RX_WARNING |
+ CAN_ERR_CRTL_TX_WARNING))
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (err_mask & (CAN_ERR_CRTL_RX_PASSIVE |
+ CAN_ERR_CRTL_TX_PASSIVE))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ }
+
+ /* donot post any error if current state didn't change */
+ if (dev->can.state == new_state)
+ return 0;
+
+ /* allocate an skb to store the error frame */
+ skb = alloc_can_err_skb(netdev, &can_frame);
+ if (!skb)
+ return -ENOMEM;
+
+ switch (new_state) {
+ case CAN_STATE_BUS_OFF:
+ can_frame->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(netdev);
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ can_frame->can_id |= CAN_ERR_CRTL;
+ can_frame->data[1] |= err_mask;
+ dev->can.can_stats.error_passive++;
+ break;
+
+ case CAN_STATE_ERROR_WARNING:
+ can_frame->can_id |= CAN_ERR_CRTL;
+ can_frame->data[1] |= err_mask;
+ dev->can.can_stats.error_warning++;
+ break;
+
+ case CAN_STATE_ERROR_ACTIVE:
+ break;
+
+ default:
+ /* CAN_STATE_MAX (trick to handle other errors) */
+ if (raw_status & PCAN_USBPRO_STATUS_OVERRUN) {
+ can_frame->can_id |= CAN_ERR_PROT;
+ can_frame->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ netdev->stats.rx_over_errors++;
+ netdev->stats.rx_errors++;
+ }
+
+ if (raw_status & PCAN_USBPRO_STATUS_QOVERRUN) {
+ can_frame->can_id |= CAN_ERR_CRTL;
+ can_frame->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ netdev->stats.rx_over_errors++;
+ netdev->stats.rx_errors++;
+ }
+
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ }
+
+ dev->can.state = new_state;
+
+ peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += can_frame->can_dlc;
+
+ return 0;
+}
+
+static void pcan_usb_pro_handle_ts(struct pcan_usb_pro_interface *usb_if,
+ struct pcan_usb_pro_rxts *ts)
+{
+ /* should wait until clock is stabilized */
+ if (usb_if->cm_ignore_count > 0)
+ usb_if->cm_ignore_count--;
+ else
+ peak_usb_set_ts_now(&usb_if->time_ref,
+ le32_to_cpu(ts->ts64[1]));
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb)
+{
+ struct pcan_usb_pro_interface *usb_if = pcan_usb_pro_dev_if(dev);
+ struct net_device *netdev = dev->netdev;
+ struct pcan_usb_pro_msg usb_msg;
+ u8 *rec_ptr, *msg_end;
+ u16 rec_cnt;
+ int err = 0;
+
+ rec_ptr = pcan_msg_init(&usb_msg, urb->transfer_buffer,
+ urb->actual_length);
+ if (!rec_ptr) {
+ netdev_err(netdev, "bad msg hdr len %d\n", urb->actual_length);
+ return -EINVAL;
+ }
+
+ /* loop reading all the records from the incoming message */
+ msg_end = urb->transfer_buffer + urb->actual_length;
+ rec_cnt = le16_to_cpu(*usb_msg.u.rec_cnt_rd);
+ for (; rec_cnt > 0; rec_cnt--) {
+ union pcan_usb_pro_rec *pr = (union pcan_usb_pro_rec *)rec_ptr;
+ u16 sizeof_rec = pcan_usb_pro_sizeof_rec[pr->data_type];
+
+ if (!sizeof_rec) {
+ netdev_err(netdev,
+ "got unsupported rec in usb msg:\n");
+ err = -ENOTSUPP;
+ break;
+ }
+
+ /* check if the record goes out of current packet */
+ if (rec_ptr + sizeof_rec > msg_end) {
+ netdev_err(netdev,
+ "got frag rec: should inc usb rx buf size\n");
+ err = -EBADMSG;
+ break;
+ }
+
+ switch (pr->data_type) {
+ case PCAN_USBPRO_RXMSG8:
+ case PCAN_USBPRO_RXMSG4:
+ case PCAN_USBPRO_RXMSG0:
+ case PCAN_USBPRO_RXRTR:
+ err = pcan_usb_pro_handle_canmsg(usb_if, &pr->rx_msg);
+ if (err < 0)
+ goto fail;
+ break;
+
+ case PCAN_USBPRO_RXSTATUS:
+ err = pcan_usb_pro_handle_error(usb_if, &pr->rx_status);
+ if (err < 0)
+ goto fail;
+ break;
+
+ case PCAN_USBPRO_RXTS:
+ pcan_usb_pro_handle_ts(usb_if, &pr->rx_ts);
+ break;
+
+ default:
+ netdev_err(netdev,
+ "unhandled rec type 0x%02x (%d): ignored\n",
+ pr->data_type, pr->data_type);
+ break;
+ }
+
+ rec_ptr += sizeof_rec;
+ }
+
+fail:
+ if (err)
+ dump_mem("received msg",
+ urb->transfer_buffer, urb->actual_length);
+
+ return err;
+}
+
+static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev,
+ struct sk_buff *skb, u8 *obuf, size_t *size)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 data_type, len, flags;
+ struct pcan_usb_pro_msg usb_msg;
+
+ pcan_msg_init_empty(&usb_msg, obuf, *size);
+
+ if ((cf->can_id & CAN_RTR_FLAG) || (cf->can_dlc == 0))
+ data_type = PCAN_USBPRO_TXMSG0;
+ else if (cf->can_dlc <= 4)
+ data_type = PCAN_USBPRO_TXMSG4;
+ else
+ data_type = PCAN_USBPRO_TXMSG8;
+
+ len = (dev->ctrl_idx << 4) | (cf->can_dlc & 0x0f);
+
+ flags = 0;
+ if (cf->can_id & CAN_EFF_FLAG)
+ flags |= 0x02;
+ if (cf->can_id & CAN_RTR_FLAG)
+ flags |= 0x01;
+
+ pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id,
+ cf->data);
+
+ *size = usb_msg.rec_buffer_len;
+
+ return 0;
+}
+
+static int pcan_usb_pro_start(struct peak_usb_device *dev)
+{
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+ int err;
+
+ err = pcan_usb_pro_set_silent(dev,
+ dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY);
+ if (err)
+ return err;
+
+ /* filter mode: 0-> All OFF; 1->bypass */
+ err = pcan_usb_pro_set_filter(dev, 1);
+ if (err)
+ return err;
+
+ /* opening first device: */
+ if (pdev->usb_if->dev_opened_count == 0) {
+ /* reset time_ref */
+ peak_usb_init_time_ref(&pdev->usb_if->time_ref, &pcan_usb_pro);
+
+ /* ask device to send ts messages */
+ err = pcan_usb_pro_set_ts(dev, 1);
+ }
+
+ pdev->usb_if->dev_opened_count++;
+
+ return err;
+}
+
+/*
+ * stop interface
+ * (last chance before set bus off)
+ */
+static int pcan_usb_pro_stop(struct peak_usb_device *dev)
+{
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+
+ /* turn off ts msgs for that interface if no other dev opened */
+ if (pdev->usb_if->dev_opened_count == 1)
+ pcan_usb_pro_set_ts(dev, 0);
+
+ pdev->usb_if->dev_opened_count--;
+
+ return 0;
+}
+
+/*
+ * called when probing to initialize a device object.
+ */
+static int pcan_usb_pro_init(struct peak_usb_device *dev)
+{
+ struct pcan_usb_pro_interface *usb_if;
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+
+ /* do this for 1st channel only */
+ if (!dev->prev_siblings) {
+ struct pcan_usb_pro_fwinfo fi;
+ struct pcan_usb_pro_blinfo bi;
+ int err;
+
+ /* allocate netdevices common structure attached to first one */
+ usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface),
+ GFP_KERNEL);
+ if (!usb_if)
+ return -ENOMEM;
+
+ /* number of ts msgs to ignore before taking one into account */
+ usb_if->cm_ignore_count = 5;
+
+ /*
+ * explicit use of dev_xxx() instead of netdev_xxx() here:
+ * information displayed are related to the device itself, not
+ * to the canx netdevices.
+ */
+ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ &fi, sizeof(fi));
+ if (err) {
+ dev_err(dev->netdev->dev.parent,
+ "unable to read %s firmware info (err %d)\n",
+ pcan_usb_pro.name, err);
+ return err;
+ }
+
+ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_BL,
+ &bi, sizeof(bi));
+ if (err) {
+ dev_err(dev->netdev->dev.parent,
+ "unable to read %s bootloader info (err %d)\n",
+ pcan_usb_pro.name, err);
+ return err;
+ }
+
+ dev_info(dev->netdev->dev.parent,
+ "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n",
+ pcan_usb_pro.name,
+ bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo,
+ pcan_usb_pro.ctrl_count);
+
+ /* tell the device the can driver is running */
+ pcan_usb_pro_drv_loaded(dev, 1);
+ } else {
+ usb_if = pcan_usb_pro_dev_if(dev->prev_siblings);
+ }
+
+ pdev->usb_if = usb_if;
+ usb_if->dev[dev->ctrl_idx] = dev;
+
+ /* set LED in default state (end of init phase) */
+ pcan_usb_pro_set_led(dev, 0, 1);
+
+ return 0;
+}
+
+static void pcan_usb_pro_exit(struct peak_usb_device *dev)
+{
+ struct pcan_usb_pro_device *pdev =
+ container_of(dev, struct pcan_usb_pro_device, dev);
+
+ /*
+ * when rmmod called before unplug and if down, should reset things
+ * before leaving
+ */
+ if (dev->can.state != CAN_STATE_STOPPED) {
+ /* set bus off on the corresponding channel */
+ pcan_usb_pro_set_bus(dev, 0);
+ }
+
+ /* if channel #0 (only) */
+ if (dev->ctrl_idx == 0) {
+ /* turn off calibration message if any device were opened */
+ if (pdev->usb_if->dev_opened_count > 0)
+ pcan_usb_pro_set_ts(dev, 0);
+
+ /* tell the PCAN-USB Pro device the driver is being unloaded */
+ pcan_usb_pro_drv_loaded(dev, 0);
+ }
+}
+
+/*
+ * called when PCAN-USB Pro adapter is unplugged
+ */
+static void pcan_usb_pro_free(struct peak_usb_device *dev)
+{
+ /* last device: can free pcan_usb_pro_interface object now */
+ if (!dev->prev_siblings && !dev->next_siblings)
+ kfree(pcan_usb_pro_dev_if(dev));
+}
+
+/*
+ * probe function for new PCAN-USB Pro usb interface
+ */
+static int pcan_usb_pro_probe(struct usb_interface *intf)
+{
+ struct usb_host_interface *if_desc;
+ int i;
+
+ if_desc = intf->altsetting;
+
+ /* check interface endpoint addresses */
+ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc;
+
+ /*
+ * below is the list of valid ep addreses. Any other ep address
+ * is considered as not-CAN interface address => no dev created
+ */
+ switch (ep->bEndpointAddress) {
+ case PCAN_USBPRO_EP_CMDOUT:
+ case PCAN_USBPRO_EP_CMDIN:
+ case PCAN_USBPRO_EP_MSGOUT_0:
+ case PCAN_USBPRO_EP_MSGOUT_1:
+ case PCAN_USBPRO_EP_MSGIN:
+ case PCAN_USBPRO_EP_UNUSED:
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * describe the PCAN-USB Pro adapter
+ */
+struct peak_usb_adapter pcan_usb_pro = {
+ .name = "PCAN-USB Pro",
+ .device_id = PCAN_USBPRO_PRODUCT_ID,
+ .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT,
+ .clock = {
+ .freq = PCAN_USBPRO_CRYSTAL_HZ,
+ },
+ .bittiming_const = {
+ .name = "pcan_usb_pro",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+ },
+
+ /* size of device private data */
+ .sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
+
+ /* timestamps usage */
+ .ts_used_bits = 32,
+ .ts_period = 1000000, /* calibration period in ts. */
+ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+ .us_per_ts_shift = 0,
+
+ /* give here messages in/out endpoints */
+ .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+ .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
+
+ /* size of rx/tx usb buffers */
+ .rx_buffer_size = PCAN_USBPRO_RX_BUFFER_SIZE,
+ .tx_buffer_size = PCAN_USBPRO_TX_BUFFER_SIZE,
+
+ /* device callbacks */
+ .intf_probe = pcan_usb_pro_probe,
+ .dev_init = pcan_usb_pro_init,
+ .dev_exit = pcan_usb_pro_exit,
+ .dev_free = pcan_usb_pro_free,
+ .dev_set_bus = pcan_usb_pro_set_bus,
+ .dev_set_bittiming = pcan_usb_pro_set_bittiming,
+ .dev_get_device_id = pcan_usb_pro_get_device_id,
+ .dev_decode_buf = pcan_usb_pro_decode_buf,
+ .dev_encode_msg = pcan_usb_pro_encode_msg,
+ .dev_start = pcan_usb_pro_start,
+ .dev_stop = pcan_usb_pro_stop,
+ .dev_restart_async = pcan_usb_pro_restart_async,
+};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
new file mode 100644
index 000000000000..a869918c5620
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -0,0 +1,178 @@
+/*
+ * CAN driver for PEAK System PCAN-USB Pro adapter
+ * Derived from the PCAN project file driver/src/pcan_usbpro_fw.h
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef PCAN_USB_PRO_H
+#define PCAN_USB_PRO_H
+
+/*
+ * USB Vendor request data types
+ */
+#define PCAN_USBPRO_REQ_INFO 0
+#define PCAN_USBPRO_REQ_FCT 2
+
+/* Vendor Request value for XXX_INFO */
+#define PCAN_USBPRO_INFO_BL 0
+#define PCAN_USBPRO_INFO_FW 1
+
+/* Vendor Request value for XXX_FCT */
+#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */
+
+/* PCAN_USBPRO_INFO_BL vendor request record type */
+struct __packed pcan_usb_pro_blinfo {
+ u32 ctrl_type;
+ u8 version[4];
+ u8 day;
+ u8 month;
+ u8 year;
+ u8 dummy;
+ u32 serial_num_hi;
+ u32 serial_num_lo;
+ u32 hw_type;
+ u32 hw_rev;
+};
+
+/* PCAN_USBPRO_INFO_FW vendor request record type */
+struct __packed pcan_usb_pro_fwinfo {
+ u32 ctrl_type;
+ u8 version[4];
+ u8 day;
+ u8 month;
+ u8 year;
+ u8 dummy;
+ u32 fw_type;
+};
+
+/*
+ * USB Command record types
+ */
+#define PCAN_USBPRO_SETBTR 0x02
+#define PCAN_USBPRO_SETBUSACT 0x04
+#define PCAN_USBPRO_SETSILENT 0x05
+#define PCAN_USBPRO_SETFILTR 0x0a
+#define PCAN_USBPRO_SETTS 0x10
+#define PCAN_USBPRO_GETDEVID 0x12
+#define PCAN_USBPRO_SETLED 0x1C
+#define PCAN_USBPRO_RXMSG8 0x80
+#define PCAN_USBPRO_RXMSG4 0x81
+#define PCAN_USBPRO_RXMSG0 0x82
+#define PCAN_USBPRO_RXRTR 0x83
+#define PCAN_USBPRO_RXSTATUS 0x84
+#define PCAN_USBPRO_RXTS 0x85
+#define PCAN_USBPRO_TXMSG8 0x41
+#define PCAN_USBPRO_TXMSG4 0x42
+#define PCAN_USBPRO_TXMSG0 0x43
+
+/* record structures */
+struct __packed pcan_usb_pro_btr {
+ u8 data_type;
+ u8 channel;
+ u16 dummy;
+ u32 CCBT;
+};
+
+struct __packed pcan_usb_pro_busact {
+ u8 data_type;
+ u8 channel;
+ u16 onoff;
+};
+
+struct __packed pcan_usb_pro_silent {
+ u8 data_type;
+ u8 channel;
+ u16 onoff;
+};
+
+struct __packed pcan_usb_pro_filter {
+ u8 data_type;
+ u8 dummy;
+ u16 filter_mode;
+};
+
+struct __packed pcan_usb_pro_setts {
+ u8 data_type;
+ u8 dummy;
+ u16 mode;
+};
+
+struct __packed pcan_usb_pro_devid {
+ u8 data_type;
+ u8 channel;
+ u16 dummy;
+ u32 serial_num;
+};
+
+struct __packed pcan_usb_pro_setled {
+ u8 data_type;
+ u8 channel;
+ u16 mode;
+ u32 timeout;
+};
+
+struct __packed pcan_usb_pro_rxmsg {
+ u8 data_type;
+ u8 client;
+ u8 flags;
+ u8 len;
+ u32 ts32;
+ u32 id;
+
+ u8 data[8];
+};
+
+#define PCAN_USBPRO_STATUS_ERROR 0x0001
+#define PCAN_USBPRO_STATUS_BUS 0x0002
+#define PCAN_USBPRO_STATUS_OVERRUN 0x0004
+#define PCAN_USBPRO_STATUS_QOVERRUN 0x0008
+
+struct __packed pcan_usb_pro_rxstatus {
+ u8 data_type;
+ u8 channel;
+ u16 status;
+ u32 ts32;
+ u32 err_frm;
+};
+
+struct __packed pcan_usb_pro_rxts {
+ u8 data_type;
+ u8 dummy[3];
+ u32 ts64[2];
+};
+
+struct __packed pcan_usb_pro_txmsg {
+ u8 data_type;
+ u8 client;
+ u8 flags;
+ u8 len;
+ u32 id;
+ u8 data[8];
+};
+
+union pcan_usb_pro_rec {
+ u8 data_type;
+ struct pcan_usb_pro_btr btr;
+ struct pcan_usb_pro_busact bus_act;
+ struct pcan_usb_pro_silent silent_mode;
+ struct pcan_usb_pro_filter filter_mode;
+ struct pcan_usb_pro_setts ts;
+ struct pcan_usb_pro_devid dev_id;
+ struct pcan_usb_pro_setled set_led;
+ struct pcan_usb_pro_rxmsg rx_msg;
+ struct pcan_usb_pro_rxstatus rx_status;
+ struct pcan_usb_pro_rxts rx_ts;
+ struct pcan_usb_pro_txmsg tx_msg;
+};
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 087648ea1edb..d5c6d92f1ee7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -47,6 +47,7 @@ static int dummy_set_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return 0;
}
@@ -135,7 +136,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags &= ~IFF_MULTICAST;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index 68da81d476f3..bf73e1a02293 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -702,7 +702,7 @@ static void el_receive(struct net_device *dev)
*/
outb(AX_SYS, AX_CMD);
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
/*
* Start of frame
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 92053e6fc980..41719da2e178 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1066,7 +1066,7 @@ el3_rx(struct net_device *dev)
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len+5);
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
if (el3_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index f67a5d3a200c..59e1e001bc3f 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -826,11 +826,10 @@ static int corkscrew_open(struct net_device *dev)
vp->rx_ring[i].next = 0;
vp->rx_ring[i].status = 0; /* Clear complete bit. */
vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
vp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* Bad news! */
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
}
@@ -1295,7 +1294,7 @@ static int corkscrew_rx(struct net_device *dev)
short pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 5 + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 5 + 2);
if (corkscrew_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
@@ -1368,7 +1367,7 @@ static int boomerang_rx(struct net_device *dev)
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 4)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
@@ -1403,10 +1402,9 @@ static int boomerang_rx(struct net_device *dev)
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL)
break; /* Bad news! */
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
vp->rx_skbuff[entry] = skb;
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 9c01bc9235b3..e61b2f82ba3a 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -1012,7 +1012,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len+5);
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
pr_debug(" Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index da410f036869..b23253b9f742 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -819,7 +819,7 @@ static int el3_rx(struct net_device *dev)
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len+5);
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f9b74c0a8492..e463d1036829 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1121,10 +1121,9 @@ static int __devinit vortex_probe1(struct device *gendev,
dev = alloc_etherdev(sizeof(*vp));
retval = -ENOMEM;
- if (!dev) {
- pr_err(PFX "unable to allocate etherdev, aborting\n");
+ if (!dev)
goto out;
- }
+
SET_NETDEV_DEV(dev, gendev);
vp = netdev_priv(dev);
@@ -2500,7 +2499,7 @@ static int vortex_rx(struct net_device *dev)
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 5);
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
if (vortex_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
@@ -2579,7 +2578,8 @@ boomerang_rx(struct net_device *dev)
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
- if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ if (pkt_len < rx_copybreak &&
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index a8bb30cf512d..bad4fa6815c5 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -97,7 +97,7 @@ config VORTEX
available from <http://www.tldp.org/docs.html#howto>. More
specific information is in
<file:Documentation/networking/vortex.txt> and in the comments at
- the beginning of <file:drivers/net/3c59x.c>.
+ the beginning of <file:drivers/net/ethernet/3com/3c59x.c>.
To compile this support as a module, choose M here.
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 6d6bc754b1a8..1234a14b2b73 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -966,18 +966,6 @@ typhoon_get_stats(struct net_device *dev)
return stats;
}
-static int
-typhoon_set_mac_address(struct net_device *dev, void *addr)
-{
- struct sockaddr *saddr = (struct sockaddr *) addr;
-
- if(netif_running(dev))
- return -EBUSY;
-
- memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
- return 0;
-}
-
static void
typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
@@ -1607,7 +1595,7 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
le32_to_cpu(indexes->rxBuffCleared))
return -ENOMEM;
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
if(!skb)
return -ENOMEM;
@@ -1618,7 +1606,6 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
skb_reserve(skb, 2);
#endif
- skb->dev = tp->dev;
dma_addr = pci_map_single(tp->pdev, skb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
@@ -1673,7 +1660,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
pkt_len = le16_to_cpu(rx->frameLen);
if(pkt_len < rx_copybreak &&
- (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
PKT_BUF_SZ,
@@ -2267,7 +2254,7 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_tx_timeout = typhoon_tx_timeout,
.ndo_get_stats = typhoon_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = typhoon_set_mac_address,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
};
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 0f92e3567f68..c30adcc9828a 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -1,4 +1,4 @@
-/* drivers/net/ax88796.c
+/* drivers/net/ethernet/8390/ax88796.c
*
* Copyright 2005,2007 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index bba51cdc74a1..c5bd8eb7a9f5 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -192,7 +192,7 @@ static int get_prom(struct pcmcia_device *link)
unsigned int ioaddr = dev->base_addr;
int i, j;
- /* This is based on drivers/net/ne.c */
+ /* This is based on drivers/net/ethernet/8390/ne.c */
struct {
u_char value, offset;
} program_seq[] = {
@@ -1408,7 +1408,7 @@ static void ei_receive(struct net_device *dev)
{
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
{
if (ei_debug > 1)
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index 05ae21435bfd..e77f624e8194 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -717,7 +717,7 @@ static void ei_receive(struct net_device *dev)
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
if (ei_debug > 1)
netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 053b2551a72d..f2a4e5de18c4 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -326,7 +326,7 @@ static hw_info_t *get_prom(struct pcmcia_device *link)
u_char prom[32];
int i, j;
- /* This is lifted straight from drivers/net/ne.c */
+ /* This is lifted straight from drivers/net/ethernet/8390/ne.c */
struct {
u_char value, offset;
} program_seq[] = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 3474a61d4705..c63a64cb6085 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -126,6 +126,7 @@ config NET_NETX
source "drivers/net/ethernet/nuvoton/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
+source "drivers/net/ethernet/nxp/Kconfig"
source "drivers/net/ethernet/octeon/Kconfig"
source "drivers/net/ethernet/oki-semi/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 08d5f0388877..9676a5109d94 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_NETX) += netx-eth.o
obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
+obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index cb4f38a17f20..d896816512ca 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -686,10 +686,9 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
}
dev = alloc_etherdev(sizeof(*np));
- if (!dev) {
- printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
+ if (!dev)
return -ENOMEM;
- }
+
SET_NETDEV_DEV(dev, &pdev->dev);
irq = pdev->irq;
@@ -1180,12 +1179,11 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_info[i].skb = skb;
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- skb->dev = dev; /* Mark as being used by this device. */
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1473,7 +1471,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev,
np->rx_info[entry].mapping,
@@ -1597,13 +1595,12 @@ static void refill_rx_ring(struct net_device *dev)
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_info[entry].skb == NULL) {
- skb = dev_alloc_skb(np->rx_buf_sz);
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_info[entry].skb = skb;
if (skb == NULL)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- skb->dev = dev; /* Mark as being used by this device. */
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index d812a103e032..ab4daeccdf98 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -113,7 +113,7 @@ static void desc_list_free(void)
}
}
-static int desc_list_init(void)
+static int desc_list_init(struct net_device *dev)
{
int i;
struct sk_buff *new_skb;
@@ -187,7 +187,7 @@ static int desc_list_init(void)
struct dma_descriptor *b = &(r->desc_b);
/* allocate a new skb for next time receive */
- new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
+ new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
pr_notice("init: low on mem - packet dropped\n");
goto init_error;
@@ -621,6 +621,7 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
setup_mac_addr(dev->dev_addr);
return 0;
}
@@ -1090,7 +1091,7 @@ static void bfin_mac_rx(struct net_device *dev)
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
- new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
+ new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
@@ -1397,7 +1398,7 @@ static int bfin_mac_open(struct net_device *dev)
}
/* initial rx and tx list */
- ret = desc_list_init();
+ ret = desc_list_init(dev);
if (ret)
return ret;
@@ -1467,10 +1468,8 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
int rc;
ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
- if (!ndev) {
- dev_err(&pdev->dev, "Cannot allocate net device!\n");
+ if (!ndev)
return -ENOMEM;
- }
SET_NETDEV_DEV(ndev, &pdev->dev);
platform_set_drvdata(pdev, ndev);
@@ -1496,12 +1495,14 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
* Grab the MAC from the board somehow
* this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
*/
- if (!is_valid_ether_addr(ndev->dev_addr))
- bfin_get_ether_addr(ndev->dev_addr);
-
- /* If still not valid, get a random one */
- if (!is_valid_ether_addr(ndev->dev_addr))
- random_ether_addr(ndev->dev_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ if (bfin_get_ether_addr(ndev->dev_addr) ||
+ !is_valid_ether_addr(ndev->dev_addr)) {
+ /* Still not valid, get a random one */
+ netdev_warn(ndev, "Setting Ethernet MAC to a random one\n");
+ eth_hw_addr_random(ndev);
+ }
+ }
setup_mac_addr(ndev->dev_addr);
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index f8559ac9a403..960905c08223 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -101,6 +101,6 @@ struct bfin_mac_local {
#endif
};
-extern void bfin_get_ether_addr(char *addr);
+extern int bfin_get_ether_addr(char *addr);
#endif
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c885aa905dec..348501178089 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -785,7 +785,6 @@ static int greth_rx(struct net_device *dev, int limit)
} else {
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = dev;
dma_sync_single_for_cpu(greth->dev,
dma_addr,
@@ -1018,7 +1017,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
regs = (struct greth_regs *) greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
@@ -1422,7 +1421,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
SET_NETDEV_DEV(dev, greth->dev);
if (netif_msg_probe(greth))
- dev_dbg(greth->dev, "reseting controller.\n");
+ dev_dbg(greth->dev, "resetting controller.\n");
/* Reset the controller. */
GRETH_REGSAVE(regs->control, GRETH_RESET);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index f872748ab4e6..6c3b1c0adaa0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -463,11 +463,8 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
static int boards_found;
dev = alloc_etherdev(sizeof(struct ace_private));
- if (dev == NULL) {
- printk(KERN_ERR "acenic: Unable to allocate "
- "net_device structure!\n");
+ if (dev == NULL)
return -ENOMEM;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 60b35fb5f524..1b046f58d58f 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -316,7 +316,7 @@ static int lance_rx (struct net_device *dev)
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
} else {
int len = (rd->mblength & 0xfff) - 4;
- struct sk_buff *skb = dev_alloc_skb (len+2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
printk ("%s: Memory squeeze, deferring packet.\n",
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 238b537b68fe..8350f4b37a8a 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -113,7 +113,7 @@ config DEPCA
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto> as well as
- <file:drivers/net/depca.c>.
+ <file:drivers/net/ethernet/amd/depca.c>.
To compile this driver as a module, choose M here. The module
will be called depca.
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 825e5d4ef4c3..689dfcafc6d4 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -290,7 +290,7 @@ static int lance_rx(struct net_device *dev)
dev->stats.rx_errors++;
} else {
int len = (rd->mblength & 0xfff) - 4;
- struct sk_buff *skb = dev_alloc_skb(len + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
netdev_warn(dev, "Memory squeeze, deferring packet\n");
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 7d5ded80d2d7..cc7b9e46780c 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/am79c961.c
+ * linux/drivers/net/ethernet/amd/am79c961a.c
*
* by Russell King <rmk@arm.linux.org.uk> 1995-2001.
*
@@ -516,7 +516,7 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
}
len = am_readword(dev, hdraddr + 6);
- skb = dev_alloc_skb(len + 2);
+ skb = netdev_alloc_skb(dev, len + 2);
if (skb) {
skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
index fd634d32756b..9f384b79507b 100644
--- a/drivers/net/ethernet/amd/am79c961a.h
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/arm/am79c961a.h
+ * linux/drivers/net/ethernet/amd/am79c961a.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 33e0a8c20f6b..9f62504d0086 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -336,7 +336,8 @@ static int amd8111e_init_ring(struct net_device *dev)
/* Allocating receive skbs */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
- if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
+ lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!lp->rx_skbuff[i]) {
/* Release previos allocated skbs */
for(--i; i >= 0 ;i--)
dev_kfree_skb(lp->rx_skbuff[i]);
@@ -768,7 +769,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
}
if(--rx_pkt_limit < 0)
goto rx_not_empty;
- if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
+ new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!new_skb) {
/* if allocation fail,
ignore that pkt and go to next one */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
@@ -1859,7 +1861,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct amd8111e_priv));
if (!dev) {
- printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
err = -ENOMEM;
goto err_free_reg;
}
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index eb18e1fe65c8..f4c228e4d76c 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -191,7 +191,7 @@ static int ariadne_rx(struct net_device *dev)
short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
netdev_warn(dev, "Memory squeeze, deferring packet\n");
for (i = 0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 15bfa28d6c53..70ed79c46245 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -997,7 +997,7 @@ static int lance_rx( struct net_device *dev )
dev->stats.rx_errors++;
}
else {
- skb = dev_alloc_skb( pkt_len+2 );
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
dev->name ));
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 8b95dd314253..397596b078d9 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -725,7 +725,7 @@ static int au1000_rx(struct net_device *dev)
/* good frame */
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
- skb = dev_alloc_skb(frmlen + 2);
+ skb = netdev_alloc_skb(dev, frmlen + 2);
if (skb == NULL) {
netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
@@ -1077,7 +1077,6 @@ static int __devinit au1000_probe(struct platform_device *pdev)
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- dev_err(&pdev->dev, "alloc_etherdev failed\n");
err = -ENOMEM;
goto err_alloc;
}
@@ -1130,9 +1129,6 @@ static int __devinit au1000_probe(struct platform_device *pdev)
au1000_setup_hw_rings(aup, aup->macdma);
- /* set a random MAC now in case platform_data doesn't provide one */
- random_ether_addr(dev->dev_addr);
-
writel(0, aup->enable);
aup->mac_enabled = 0;
@@ -1142,8 +1138,12 @@ static int __devinit au1000_probe(struct platform_device *pdev)
" PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
- if (is_valid_ether_addr(pd->mac))
+ if (is_valid_ether_addr(pd->mac)) {
memcpy(dev->dev_addr, pd->mac, 6);
+ } else {
+ /* Set a random MAC since no valid provided by platform_data. */
+ eth_hw_addr_random(dev);
+ }
aup->phy_static_config = pd->phy_static_config;
aup->phy_search_highest_addr = pd->phy_search_highest_addr;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 73f8d4fa682d..7dc508e5c72e 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -605,7 +605,7 @@ static int lance_rx(struct net_device *dev)
dev->stats.rx_errors++;
} else {
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
- skb = dev_alloc_skb(len + 2);
+ skb = netdev_alloc_skb(dev, len + 2);
if (skb == 0) {
printk("%s: Memory squeeze, deferring packet.\n",
@@ -1052,8 +1052,6 @@ static int __devinit dec_lance_probe(struct device *bdev, const int type)
dev = alloc_etherdev(sizeof(struct lance_private));
if (!dev) {
- printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n",
- name);
ret = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
index 681970c07f22..86dd95766a64 100644
--- a/drivers/net/ethernet/amd/depca.c
+++ b/drivers/net/ethernet/amd/depca.c
@@ -1042,7 +1042,7 @@ static int depca_rx(struct net_device *dev)
short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb != NULL) {
unsigned char *buf;
skb_reserve(skb, 2); /* 16 byte align the IP header */
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 86aa0d546a5b..4e2d68a4de8a 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -89,7 +89,6 @@ static int __devinit hplance_init_one(struct dio_dev *d,
{
struct net_device *dev;
int err = -ENOMEM;
- int i;
dev = alloc_etherdev(sizeof(struct hplance_private));
if (!dev)
@@ -107,13 +106,8 @@ static int __devinit hplance_init_one(struct dio_dev *d,
dio_set_drvdata(d, dev);
- printk(KERN_INFO "%s: %s; select code %d, addr %2.2x", dev->name, d->name, d->scode, dev->dev_addr[0]);
-
- for (i=1; i<6; i++) {
- printk(":%2.2x", dev->dev_addr[i]);
- }
-
- printk(", irq %d\n", d->ipl);
+ printk(KERN_INFO "%s: %s; select code %d, addr %pM, irq %d\n",
+ dev->name, d->name, d->scode, dev->dev_addr, d->ipl);
return 0;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 6e6aa7213aab..013b65108536 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -621,10 +621,8 @@ static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
}
else {
ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
- if(!ret) {
- printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ if(!ret)
return NULL;
- }
}
if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
@@ -1091,7 +1089,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
if (skb)
skb_reserve(skb,16);
#else
- struct sk_buff *skb = dev_alloc_skb(len+2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
#endif
if(skb)
{
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 6be0dd67631a..ebdb9e238a8d 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -1104,7 +1104,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
pr_debug(" receiving packet size 0x%X rx_status"
" 0x%X.\n", pkt_len, rx_status);
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb != NULL) {
skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 20e6dab0186c..86b6d8e4e6cd 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -588,11 +588,11 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* now allocate any new buffers needed */
for (; new < size; new++) {
struct sk_buff *rx_skbuff;
- new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
+ new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = new_skb_list[new];
if (!rx_skbuff) {
/* keep the original lists and buffers */
- netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
__func__);
goto free_all_new;
}
@@ -909,7 +909,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
/* Initialize Transmit buffers. */
size = data_len + 15;
for (x = 0; x < numbuffs; x++) {
- skb = dev_alloc_skb(size);
+ skb = netdev_alloc_skb(dev, size);
if (!skb) {
netif_printk(lp, hw, KERN_DEBUG, dev,
"Cannot allocate skb at line: %d!\n",
@@ -1152,7 +1152,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
- newskb = dev_alloc_skb(PKT_BUF_SKB);
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
skb = lp->rx_skbuff[entry];
@@ -1172,7 +1172,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = NULL;
} else
- skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (skb == NULL) {
netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
@@ -1649,8 +1649,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- pr_err("Memory allocation failed\n");
ret = -ENOMEM;
goto err_release_region;
}
@@ -2273,11 +2271,11 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
if (rx_skbuff == NULL) {
- lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
+ lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
/* there is not much we can do at this point */
- netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
__func__);
return -1;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 080b71fcc683..74b3891b6483 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -810,7 +810,7 @@ static int lance_rx( struct net_device *dev )
dev->stats.rx_errors++;
}
else {
- skb = dev_alloc_skb( pkt_len+2 );
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
dev->name ));
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 7ea16d32a5f5..e3fe3504e198 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -534,7 +534,7 @@ static void lance_rx_dvma(struct net_device *dev)
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
} else {
len = (rd->mblength & 0xfff) - 4;
- skb = dev_alloc_skb(len + 2);
+ skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
@@ -706,7 +706,7 @@ static void lance_rx_pio(struct net_device *dev)
if (bits & LE_R1_EOP) dev->stats.rx_errors++;
} else {
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
- skb = dev_alloc_skb(len + 2);
+ skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index d070b229dbf7..855bdafb1a87 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -607,8 +607,9 @@ bmac_init_tx_ring(struct bmac_data *bp)
}
static int
-bmac_init_rx_ring(struct bmac_data *bp)
+bmac_init_rx_ring(struct net_device *dev)
{
+ struct bmac_data *bp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
int i;
struct sk_buff *skb;
@@ -618,7 +619,7 @@ bmac_init_rx_ring(struct bmac_data *bp)
(N_RX_RING + 1) * sizeof(struct dbdma_cmd));
for (i = 0; i < N_RX_RING; i++) {
if ((skb = bp->rx_bufs[i]) == NULL) {
- bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
if (skb != NULL)
skb_reserve(skb, 2);
}
@@ -722,7 +723,7 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
++dev->stats.rx_dropped;
}
if ((skb = bp->rx_bufs[i]) == NULL) {
- bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
if (skb != NULL)
skb_reserve(bp->rx_bufs[i], 2);
}
@@ -1208,7 +1209,7 @@ static void bmac_reset_and_enable(struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
bmac_enable_and_reset_chip(dev);
bmac_init_tx_ring(bp);
- bmac_init_rx_ring(bp);
+ bmac_init_rx_ring(dev);
bmac_init_chip(dev);
bmac_start_chip(dev);
bmwrite(dev, INTDISABLE, EnableNormal);
@@ -1218,7 +1219,7 @@ static void bmac_reset_and_enable(struct net_device *dev)
* It seems that the bmac can't receive until it's transmitted
* a packet. So we give it a dummy packet to transmit.
*/
- skb = dev_alloc_skb(ETHERMINPACKET);
+ skb = netdev_alloc_skb(dev, ETHERMINPACKET);
if (skb != NULL) {
data = skb_put(skb, ETHERMINPACKET);
memset(data, 0, ETHERMINPACKET);
@@ -1269,10 +1270,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
memcpy(addr, prop_addr, sizeof(addr));
dev = alloc_etherdev(PRIV_BYTES);
- if (!dev) {
- printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
bp = netdev_priv(dev);
SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
@@ -1660,10 +1659,8 @@ static int __init bmac_init(void)
{
if (bmac_emergency_rxbuf == NULL) {
bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
- if (bmac_emergency_rxbuf == NULL) {
- printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
+ if (bmac_emergency_rxbuf == NULL)
return -ENOMEM;
- }
}
return macio_register_driver(&bmac_driver);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index bec87bd9195c..e1df4b76c885 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -136,10 +136,8 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
*/
if (dummy_buf == NULL) {
dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
- if (dummy_buf == NULL) {
- printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
+ if (dummy_buf == NULL)
return -ENOMEM;
- }
}
if (macio_request_resources(mdev, "mace")) {
@@ -149,7 +147,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
dev = alloc_etherdev(PRIV_BYTES);
if (!dev) {
- printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
rc = -ENOMEM;
goto err_release;
}
@@ -447,7 +444,7 @@ static int mace_open(struct net_device *dev)
memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
cp = mp->rx_cmds;
for (i = 0; i < N_RX_RING - 1; ++i) {
- skb = dev_alloc_skb(RX_BUFLEN + 2);
+ skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
if (!skb) {
data = dummy_buf;
} else {
@@ -959,7 +956,7 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
cp = mp->rx_cmds + i;
skb = mp->rx_bufs[i];
if (!skb) {
- skb = dev_alloc_skb(RX_BUFLEN + 2);
+ skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
if (skb) {
skb_reserve(skb, 2);
mp->rx_bufs[i] = skb;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 7cf81bbffe0e..ab7ff8645ab1 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -661,7 +661,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
} else {
unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
- skb = dev_alloc_skb(frame_length + 2);
+ skb = netdev_alloc_skb(dev, frame_length + 2);
if (!skb) {
dev->stats.rx_dropped++;
return;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 23f2ab0f2fa8..bd1667cbffa6 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -224,7 +224,7 @@ int atl1c_read_mac_addr(struct atl1c_hw *hw)
random_ether_addr(hw->perm_mac_addr);
memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
- return 0;
+ return err;
}
/*
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 47a9bb2c813c..1ef0c9275dee 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -468,6 +468,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atl1c_hw_set_mac_addr(&adapter->hw);
@@ -1765,7 +1766,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = dev_alloc_skb(adapter->rx_buffer_len);
+ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
@@ -2685,7 +2686,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
if (netdev == NULL) {
err = -ENOMEM;
- dev_err(&pdev->dev, "etherdev alloc failed\n");
goto err_alloc_etherdev;
}
@@ -2742,10 +2742,9 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
err = -EIO;
goto err_reset;
}
- if (atl1c_read_mac_addr(&adapter->hw) != 0) {
- err = -EIO;
- dev_err(&pdev->dev, "get mac address failed\n");
- goto err_eeprom;
+ if (atl1c_read_mac_addr(&adapter->hw)) {
+ /* got a random MAC address, set NET_ADDR_RANDOM to netdev */
+ netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
@@ -2770,7 +2769,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
err_reset:
err_register:
err_sw_init:
-err_eeprom:
iounmap(adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index c915c0873810..93ff2b231284 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2300,7 +2300,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
if (netdev == NULL) {
err = -ENOMEM;
- dev_err(&pdev->dev, "etherdev alloc failed\n");
goto err_alloc_etherdev;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 9bd204976648..40ac41436549 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -534,14 +534,17 @@ static int atl1_get_permanent_address(struct atl1_hw *hw)
*/
static s32 atl1_read_mac_addr(struct atl1_hw *hw)
{
+ s32 ret = 0;
u16 i;
- if (atl1_get_permanent_address(hw))
+ if (atl1_get_permanent_address(hw)) {
random_ether_addr(hw->perm_mac_addr);
+ ret = 1;
+ }
for (i = 0; i < ETH_ALEN; i++)
hw->mac_addr[i] = hw->perm_mac_addr[i];
- return 0;
+ return ret;
}
/*
@@ -3007,7 +3010,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
}
/* copy the MAC address out of the EEPROM */
- atl1_read_mac_addr(&adapter->hw);
+ if (atl1_read_mac_addr(&adapter->hw)) {
+ /* mark random mac */
+ netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ }
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 071f4c858969..6762dc406b25 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2258,7 +2258,7 @@ static int get_permanent_address(struct atl2_hw *hw)
u32 Addr[2];
u32 i, Control;
u16 Register;
- u8 EthAddr[NODE_ADDRESS_SIZE];
+ u8 EthAddr[ETH_ALEN];
bool KeyValid;
if (is_valid_ether_addr(hw->perm_mac_addr))
@@ -2299,7 +2299,7 @@ static int get_permanent_address(struct atl2_hw *hw)
*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
if (is_valid_ether_addr(EthAddr)) {
- memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+ memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
return 0;
}
return 1;
@@ -2334,7 +2334,7 @@ static int get_permanent_address(struct atl2_hw *hw)
*(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
if (is_valid_ether_addr(EthAddr)) {
- memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+ memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
return 0;
}
/* maybe MAC-address is from BIOS */
@@ -2344,7 +2344,7 @@ static int get_permanent_address(struct atl2_hw *hw)
*(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
if (is_valid_ether_addr(EthAddr)) {
- memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+ memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN);
return 0;
}
@@ -2358,8 +2358,6 @@ static int get_permanent_address(struct atl2_hw *hw)
*/
static s32 atl2_read_mac_addr(struct atl2_hw *hw)
{
- u16 i;
-
if (get_permanent_address(hw)) {
/* for test */
/* FIXME: shouldn't we use random_ether_addr() here? */
@@ -2371,8 +2369,7 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw)
hw->perm_mac_addr[5] = 0x38;
}
- for (i = 0; i < NODE_ADDRESS_SIZE; i++)
- hw->mac_addr[i] = hw->perm_mac_addr[i];
+ memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN);
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index bf9016ebdd9b..3ebe19f7242b 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -47,7 +47,6 @@ extern int ethtool_ioctl(struct ifreq *ifr);
#define PCI_COMMAND_REGISTER PCI_COMMAND
#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
-#define ETH_ADDR_LEN ETH_ALEN
#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
((a)->hw_addr + (reg))))
@@ -429,8 +428,8 @@ struct atl2_hw {
u8 flash_vendor;
u8 dma_fairness;
- u8 mac_addr[NODE_ADDRESS_SIZE];
- u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_mac_addr[ETH_ALEN];
/* FIXME */
/* bool phy_preamble_sup; */
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 8ff7411094d5..3cd8837236dc 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -84,6 +84,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atlx_set_mac_addr(&adapter->hw);
return 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.h b/drivers/net/ethernet/atheros/atlx/atlx.h
index 14054b75aa62..448f5dcc02e6 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.h
+++ b/drivers/net/ethernet/atheros/atlx/atlx.h
@@ -484,7 +484,6 @@
/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */
#define EEPROM_SUM 0xBABA
-#define NODE_ADDRESS_SIZE 6
struct atlx_spi_flash_dev {
const char *manu_name; /* manufacturer id */
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index cab87456a34a..46b8b7d81633 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2138,7 +2138,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
- dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 021fb818007a..8297e2868736 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2625,10 +2625,8 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
u32 val;
good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
- if (good_mbuf == NULL) {
- pr_err("Failed to allocate memory in %s\n", __func__);
+ if (good_mbuf == NULL)
return -ENOMEM;
- }
REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
@@ -6248,7 +6246,16 @@ static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
int cpus = num_online_cpus();
- int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
+ int msix_vecs;
+
+ if (!bp->num_req_rx_rings)
+ msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
+ else if (!bp->num_req_tx_rings)
+ msix_vecs = max(cpus, bp->num_req_rx_rings);
+ else
+ msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
+
+ msix_vecs = min(msix_vecs, RX_MAX_RINGS);
bp->irq_tbl[0].handler = bnx2_interrupt;
strcpy(bp->irq_tbl[0].name, bp->dev->name);
@@ -6272,10 +6279,18 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
}
}
- bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+ if (!bp->num_req_tx_rings)
+ bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+ else
+ bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
+
+ if (!bp->num_req_rx_rings)
+ bp->num_rx_rings = bp->irq_nvecs;
+ else
+ bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
+
netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
- bp->num_rx_rings = bp->irq_nvecs;
return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
}
@@ -6550,6 +6565,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+ /* Sync BD data before updating TX mailbox */
+ wmb();
+
netdev_tx_sent_queue(txq, skb->len);
prod = NEXT_TX_BD(prod);
@@ -7164,7 +7182,7 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
}
static int
-bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
+bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
{
if (netif_running(bp->dev)) {
/* Reset will erase chipset stats; save them */
@@ -7172,7 +7190,12 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
- __bnx2_free_irq(bp);
+ if (reset_irq) {
+ bnx2_free_irq(bp);
+ bnx2_del_napi(bp);
+ } else {
+ __bnx2_free_irq(bp);
+ }
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
}
@@ -7181,9 +7204,16 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bp->tx_ring_size = tx;
if (netif_running(bp->dev)) {
- int rc;
+ int rc = 0;
+
+ if (reset_irq) {
+ rc = bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_init_napi(bp);
+ }
+
+ if (!rc)
+ rc = bnx2_alloc_mem(bp);
- rc = bnx2_alloc_mem(bp);
if (!rc)
rc = bnx2_request_irq(bp);
@@ -7219,7 +7249,8 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
return -EINVAL;
}
- rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
+ rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
+ false);
return rc;
}
@@ -7607,6 +7638,54 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static void bnx2_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ u32 max_rx_rings = 1;
+ u32 max_tx_rings = 1;
+
+ if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
+ max_rx_rings = RX_MAX_RINGS;
+ max_tx_rings = TX_MAX_RINGS;
+ }
+
+ channels->max_rx = max_rx_rings;
+ channels->max_tx = max_tx_rings;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = bp->num_rx_rings;
+ channels->tx_count = bp->num_tx_rings;
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
+static int bnx2_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ u32 max_rx_rings = 1;
+ u32 max_tx_rings = 1;
+ int rc = 0;
+
+ if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
+ max_rx_rings = RX_MAX_RINGS;
+ max_tx_rings = TX_MAX_RINGS;
+ }
+ if (channels->rx_count > max_rx_rings ||
+ channels->tx_count > max_tx_rings)
+ return -EINVAL;
+
+ bp->num_req_rx_rings = channels->rx_count;
+ bp->num_req_tx_rings = channels->tx_count;
+
+ if (netif_running(dev))
+ rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
+ bp->tx_ring_size, true);
+
+ return rc;
+}
+
static const struct ethtool_ops bnx2_ethtool_ops = {
.get_settings = bnx2_get_settings,
.set_settings = bnx2_set_settings,
@@ -7631,6 +7710,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
.set_phys_id = bnx2_set_phys_id,
.get_ethtool_stats = bnx2_get_ethtool_stats,
.get_sset_count = bnx2_get_sset_count,
+ .get_channels = bnx2_get_channels,
+ .set_channels = bnx2_set_channels,
};
/* Called with rtnl_lock */
@@ -7692,7 +7773,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
struct bnx2 *bp = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -7712,7 +7793,8 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
- return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
+ return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
+ false);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 1db2d51ba3f1..dc06bda73be7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6933,6 +6933,9 @@ struct bnx2 {
u8 num_tx_rings;
u8 num_rx_rings;
+ int num_req_tx_rings;
+ int num_req_rx_rings;
+
u32 leds_save;
u32 idle_chk_status_idx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 8c73d34b2ff1..e37161f19250 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.70.35-0"
-#define DRV_MODULE_RELDATE "2011/11/10"
+#define DRV_MODULE_VERSION "1.72.10-0"
+#define DRV_MODULE_RELDATE "2012/02/20"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -58,18 +58,22 @@
#define DRV_MODULE_NAME "bnx2x"
/* for messages that are currently off */
-#define BNX2X_MSG_OFF 0
-#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
-#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
-#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
-#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
-#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
-#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_OFF 0x0
+#define BNX2X_MSG_MCP 0x0010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS 0x0020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM 0x0040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE 0x0080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_IOV 0x0800000
+#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/
+#define BNX2X_MSG_ETHTOOL 0x4000000
+#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
#define DP(__mask, fmt, ...) \
do { \
- if (bp->msg_enable & (__mask)) \
+ if (unlikely(bp->msg_enable & (__mask))) \
pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
@@ -78,14 +82,14 @@ do { \
#define DP_CONT(__mask, fmt, ...) \
do { \
- if (bp->msg_enable & (__mask)) \
+ if (unlikely(bp->msg_enable & (__mask))) \
pr_cont(fmt, ##__VA_ARGS__); \
} while (0)
/* errors debug print */
#define BNX2X_DBG_ERR(fmt, ...) \
do { \
- if (netif_msg_probe(bp)) \
+ if (unlikely(netif_msg_probe(bp))) \
pr_err("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
@@ -108,7 +112,7 @@ do { \
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(fmt, ...) \
do { \
- if (netif_msg_probe(bp)) \
+ if (unlikely(netif_msg_probe(bp))) \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
@@ -341,6 +345,7 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
@@ -445,6 +450,8 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ u16 gro_size;
+ u16 full_page;
};
#define Q_STATS_OFFSET32(stat_name) \
@@ -473,6 +480,11 @@ struct bnx2x_fp_txdata {
int txq_index;
};
+enum bnx2x_tpa_mode_t {
+ TPA_MODE_LRO,
+ TPA_MODE_GRO
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -489,6 +501,8 @@ struct bnx2x_fastpath {
dma_addr_t status_blk_mapping;
+ enum bnx2x_tpa_mode_t mode;
+
u8 max_cos; /* actual number of active tx coses */
struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
@@ -540,6 +554,7 @@ struct bnx2x_fastpath {
struct ustorm_per_queue_stats old_uclient;
struct xstorm_per_queue_stats old_xclient;
struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
/* The size is calculated using the following:
sizeof name field from netdev structure +
@@ -1046,7 +1061,6 @@ struct bnx2x_slowpath {
struct nig_stats nig_stats;
struct host_port_stats port_stats;
struct host_func_stats func_stats;
- struct host_func_stats func_stats_base;
u32 wb_comp;
u32 wb_data[4];
@@ -1088,7 +1102,8 @@ enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
BNX2X_RECOVERY_INIT,
BNX2X_RECOVERY_WAIT,
- BNX2X_RECOVERY_FAILED
+ BNX2X_RECOVERY_FAILED,
+ BNX2X_RECOVERY_NIC_LOADING
};
/*
@@ -1198,6 +1213,9 @@ struct bnx2x {
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+/* TCP with Timestamp Option (32) + IPv6 (40) */
+#define ETH_MAX_TPA_HEADER_SIZE 72
+#define ETH_MIN_TPA_HEADER_SIZE 40
/* Max supported alignment is 256 (8 shift) */
#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
@@ -1268,6 +1286,7 @@ struct bnx2x {
#define NO_MCP_FLAG (1 << 9)
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
+#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
#define NO_ISCSI_OOO_FLAG (1 << 13)
@@ -1316,6 +1335,8 @@ struct bnx2x {
u8 wol;
+ bool gro_check;
+
int rx_ring_size;
u16 tx_quick_cons_trip_int;
@@ -1461,6 +1482,11 @@ struct bnx2x {
u16 stats_counter;
struct bnx2x_eth_stats eth_stats;
+ struct host_func_stats func_stats;
+ struct bnx2x_eth_stats_old eth_stats_old;
+ struct bnx2x_net_stats_old net_stats_old;
+ struct bnx2x_fw_port_stats_old fw_stats_old;
+ bool stats_init;
struct z_stream_s *strm;
void *gunzip_buf;
@@ -2073,8 +2099,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
-int bnx2x_close(struct net_device *dev);
-
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2094,14 +2118,22 @@ void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
-#define BNX2X_MF_PROTOCOL(bp) \
+#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
#ifdef BCM_CNIC
-#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
- (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
+#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
-#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
+ (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
+ BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
#endif
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 99389c8dda21..f1f3ca65667a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,46 +32,6 @@
/**
- * bnx2x_bz_fp - zero content of the fastpath structure.
- *
- * @bp: driver handle
- * @index: fastpath index to be zeroed
- *
- * Makes sure the contents of the bp->fp[index].napi is kept
- * intact.
- */
-static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
-{
- struct bnx2x_fastpath *fp = &bp->fp[index];
- struct napi_struct orig_napi = fp->napi;
- /* bzero bnx2x_fastpath contents */
- memset(fp, 0, sizeof(*fp));
-
- /* Restore the NAPI object as it has been already initialized */
- fp->napi = orig_napi;
-
- fp->bp = bp;
- fp->index = index;
- if (IS_ETH_FP(fp))
- fp->max_cos = bp->max_cos;
- else
- /* Special queues support only one CoS */
- fp->max_cos = 1;
-
- /*
- * set the tpa flag for each queue. The tpa flag determines the queue
- * minimal size so it must be set prior to queue memory allocation
- */
- fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-#ifdef BCM_CNIC
- /* We don't want TPA on an FCoE L2 ring */
- if (IS_FCOE_FP(fp))
- fp->disable_tpa = 1;
-#endif
-}
-
-/**
* bnx2x_move_fp - move content of the fastpath structure.
*
* @bp: driver handle
@@ -115,11 +75,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
- DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
/* unmap first bd */
- DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
@@ -150,7 +109,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* now free frags */
while (nbd > 0) {
- DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
@@ -160,10 +118,11 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
- if (skb) {
+ if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
}
+
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -191,8 +150,8 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
pkt_cons = TX_BD(sw_cons);
- DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
- " pkt_cons %u\n",
+ DP(NETIF_MSG_TX_DONE,
+ "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
@@ -249,13 +208,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
fp->last_max_sge = idx;
}
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
- struct eth_fast_path_rx_cqe *fp_cqe)
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ u16 sge_len,
+ struct eth_end_agg_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
- u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
- le16_to_cpu(fp_cqe->len_on_bd)) >>
- SGE_PAGE_SHIFT;
u16 last_max, last_elem, first_elem;
u16 delta = 0;
u16 i;
@@ -266,15 +223,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
- RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+ RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
- sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
/* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask));
bnx2x_update_last_max_sge(fp,
- le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
@@ -368,6 +325,22 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ if (fp->mode == TPA_MODE_GRO) {
+ u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
+ tpa_info->full_page =
+ SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ /*
+ * FW 7.2.16 BUG workaround:
+ * if SGE size is (exactly) multiple gro_size
+ * fw will place one less frag on SGE.
+ * the calculation is done only for potentially
+ * dangerous MTUs.
+ */
+ if (unlikely(bp->gro_check))
+ if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
+ tpa_info->full_page -= gro_size;
+ tpa_info->gro_size = gro_size;
+ }
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -424,25 +397,40 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
}
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct sk_buff *skb,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct sk_buff *skb,
struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
- u32 i, frag_len, frag_size, pages;
- int err;
- int j;
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u32 i, frag_len, frag_size;
+ int err, j, frag_id = 0;
u16 len_on_bd = tpa_info->len_on_bd;
+ u16 full_page = 0, gro_size = 0;
frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
- pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+ if (fp->mode == TPA_MODE_GRO) {
+ gro_size = tpa_info->gro_size;
+ full_page = tpa_info->full_page;
+ }
/* This is needed in order to enable forwarding support */
- if (frag_size)
+ if (frag_size) {
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
tpa_info->parsing_flags, len_on_bd);
+ /* set for GRO */
+ if (fp->mode == TPA_MODE_GRO)
+ skb_shinfo(skb)->gso_type =
+ (GET_FLAG(tpa_info->parsing_flags,
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6) ?
+ SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
+ }
+
+
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
@@ -459,7 +447,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
- frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+ if (fp->mode == TPA_MODE_GRO)
+ frag_len = min_t(u32, frag_size, (u32)full_page);
+ else /* LRO */
+ frag_len = min_t(u32, frag_size,
+ (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
@@ -475,9 +468,21 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
/* Add one frag and update the appropriate fields in the skb */
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ if (fp->mode == TPA_MODE_LRO)
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ else { /* GRO */
+ int rem;
+ int offset = 0;
+ for (rem = frag_len; rem > 0; rem -= gro_size) {
+ int len = rem > gro_size ? gro_size : rem;
+ skb_fill_page_desc(skb, frag_id++,
+ old_rx_pg.page, offset, len);
+ if (offset)
+ get_page(old_rx_pg.page);
+ offset += len;
+ }
+ }
skb->data_len += frag_len;
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
@@ -489,18 +494,17 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct eth_end_agg_rx_cqe *cqe,
- u16 cqe_idx)
+static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
{
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u32 pad = tpa_info->placement_offset;
+ u8 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
struct sk_buff *skb = NULL;
- u8 *data = rx_buf->data;
- /* alloc new skb */
- u8 *new_data;
+ u8 *new_data, *data = rx_buf->data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -525,8 +529,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
- BNX2X_ERR("skb_put is about to fail... "
- "pad %d len %d rx_buf_size %d\n",
+ BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
@@ -540,13 +543,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
+ skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
napi_gro_receive(&fp->napi, skb);
} else {
- DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
- " - dropping packet!\n");
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate new pages - dropping packet!\n");
dev_kfree_skb_any(skb);
}
@@ -605,7 +609,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
- u16 len, pad;
+ u16 len, pad, queue;
u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
@@ -622,28 +626,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
- DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
- " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
+ DP(NETIF_MSG_RX_STATUS,
+ "CQE type %x err %x status %x queue %x vlan %x len %u\n",
+ CQE_TYPE(cqe_fp_flags),
cqe_fp_flags, cqe_fp->status_flags,
le32_to_cpu(cqe_fp->rss_hash_result),
- le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+ le16_to_cpu(cqe_fp->vlan_tag),
+ le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
/* is this a slowpath msg? */
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
}
+
rx_buf = &fp->rx_buf_ring[bd_cons];
data = rx_buf->data;
if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ struct bnx2x_agg_info *tpa_info;
+ u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
if (fp->disable_tpa &&
(CQE_TYPE_START(cqe_fp_type) ||
CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
+ BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
CQE_TYPE(cqe_fp_type));
#endif
@@ -656,28 +664,38 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bnx2x_tpa_start(fp, queue,
bd_cons, bd_prod,
cqe_fp);
+
goto next_rx;
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ }
+ queue = cqe->end_agg_cqe.queue_index;
+ tpa_info = &fp->tpa_info[queue];
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
+ tpa_info->len_on_bd;
+
+ if (fp->mode == TPA_MODE_GRO)
+ pages = (frag_size + tpa_info->full_page - 1) /
+ tpa_info->full_page;
+ else
+ pages = SGE_PAGE_ALIGN(frag_size) >>
+ SGE_PAGE_SHIFT;
+
+ bnx2x_tpa_stop(bp, fp, tpa_info, pages,
+ &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
+ goto next_cqe;
}
/* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
+ len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
pad = cqe_fp->placement_offset;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
@@ -687,7 +705,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
prefetch(data + pad); /* speedup eth_type_trans() */
/* is this an error packet? */
if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
- DP(NETIF_MSG_RX_ERR,
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
fp->eth_q_stats.rx_err_discard_pkt++;
@@ -701,7 +719,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
(len <= RX_COPY_THRESH)) {
skb = netdev_alloc_skb_ip_align(bp->dev, len);
if (skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
@@ -722,9 +740,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
skb_reserve(skb, pad);
} else {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped because "
- "of alloc failure\n");
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+ "ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
@@ -793,8 +810,8 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
struct bnx2x *bp = fp->bp;
u8 cos;
- DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
- "[fp %d fw_sd %d igusb %d]\n",
+ DP(NETIF_MSG_INTR,
+ "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1007,10 +1024,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
GFP_ATOMIC);
if (!first_buf->data) {
- BNX2X_ERR("Failed to allocate TPA "
- "skb pool for queue[%d] - "
- "disabling TPA on this "
- "queue!\n", j);
+ BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
+ j);
bnx2x_free_tpa_pool(bp, fp, i);
fp->disable_tpa = 1;
break;
@@ -1030,10 +1045,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx sges\n", i);
- BNX2X_ERR("disabling TPA for "
- "queue[%d]\n", j);
+ BNX2X_ERR("was only able to allocate %d rx sges\n",
+ i);
+ BNX2X_ERR("disabling TPA for queue[%d]\n",
+ j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp,
ring_prod);
@@ -1188,8 +1203,8 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
for_each_eth_queue(bp, i) {
if (nvecs == offset)
return;
- DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
- "irq\n", i, bp->msix_table[offset].vector);
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
+ i, bp->msix_table[offset].vector);
free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
}
@@ -1211,21 +1226,21 @@ int bnx2x_enable_msix(struct bnx2x *bp)
int msix_vec = 0, i, rc, req_cnt;
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
bp->msix_table[0].entry);
msix_vec++;
#ifdef BCM_CNIC
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
/* We need separate vectors for ETH queues only (not FCoE) */
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
- "(fastpath #%u)\n", msix_vec, msix_vec, i);
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
+ msix_vec, msix_vec, i);
msix_vec++;
}
@@ -1241,14 +1256,12 @@ int bnx2x_enable_msix(struct bnx2x *bp)
/* how less vectors we will have? */
int diff = req_cnt - rc;
- DP(NETIF_MSG_IFUP,
- "Trying to use less MSI-X vectors: %d\n", rc);
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
if (rc) {
- DP(NETIF_MSG_IFUP,
- "MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
return rc;
}
/*
@@ -1256,13 +1269,13 @@ int bnx2x_enable_msix(struct bnx2x *bp)
*/
bp->num_queues -= diff;
- DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
bp->num_queues);
} else if (rc) {
/* fall to INTx if not enough memory */
if (rc == -ENOMEM)
bp->flags |= DISABLE_MSI_FLAG;
- DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -1305,8 +1318,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_PRESENT;
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
- " ... fp[%d] %d\n",
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
bp->msix_table[0].vector,
0, bp->msix_table[offset].vector,
i - 1, bp->msix_table[offset + i - 1].vector);
@@ -1320,7 +1332,7 @@ int bnx2x_enable_msi(struct bnx2x *bp)
rc = pci_enable_msi(bp->pdev);
if (rc) {
- DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+ BNX2X_DEV_INFO("MSI is not attainable\n");
return -1;
}
bp->flags |= USING_MSI_FLAG;
@@ -1441,8 +1453,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
}
#ifdef BCM_CNIC
- /* override in ISCSI SD mod */
- if (IS_MF_ISCSI_SD(bp))
+ /* override in STORAGE SD mode */
+ if (IS_MF_STORAGE_SD(bp))
bp->num_queues = 1;
#endif
/* Add special queues */
@@ -1497,7 +1509,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
return rc;
}
- DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
tx, rx);
return rc;
@@ -1562,7 +1574,7 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
{
- struct bnx2x_config_rss_params params = {0};
+ struct bnx2x_config_rss_params params = {NULL};
int i;
/* Although RSS is meaningless when there is a single HW queue we
@@ -1625,7 +1637,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -1646,7 +1658,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1678,8 +1690,8 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Add a DEL command... */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
- BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
- "object: %d\n", rc);
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
+ rc);
/* ...and wait until all pending commands are cleared */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
@@ -1717,8 +1729,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
int i, rc;
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't load NIC when there is panic\n");
return -EPERM;
+ }
#endif
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
@@ -1738,6 +1752,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
* allocated only once, fp index, max_cos, bp pointer.
* Also set fp->disable_tpa.
*/
+ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
@@ -1766,12 +1781,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_napi_enable(bp);
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
/* Send LOAD_REQUEST command to MCP
* Returns the type of LOAD command:
* if it is the first port to be initialized
* common blocks should be initialized, otherwise - not
*/
if (!BP_NOMCP(bp)) {
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
@@ -1779,9 +1809,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("Driver load refused\n");
rc = -EBUSY; /* other port in diagnostic mode */
LOAD_ERROR_EXIT(bp, load_error1);
}
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ BNX2X_ERR("bnx2x with FW %x already loaded, "
+ "which mismatches my %x FW. aborting",
+ loaded_fw, my_fw);
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+ }
} else {
int path = BP_PATH(bp);
@@ -1816,7 +1870,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} else
bp->port.pmf = 0;
- DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
/* Init Function state controlling object */
bnx2x__init_func_obj(bp);
@@ -1832,6 +1886,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
+ BNX2X_ERR("IRQs setup failed\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
@@ -1882,21 +1937,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
}
rc = bnx2x_init_rss_pf(bp);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
rc = bnx2x_set_eth_mac(bp, true);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
if (bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
@@ -1948,7 +2009,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
- bnx2x_inc_load_cnt(bp);
+
+ /* mark driver is loaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+ DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
/* Wait for all pending SP commands to complete */
if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
@@ -1988,6 +2057,8 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+ /* clear pf_load status, as it was already set */
+ bnx2x_clear_pf_load(bp);
load_error0:
bnx2x_free_mem(bp);
@@ -2001,6 +2072,14 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
int i;
bool global = false;
+ /* mark driver is unloaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
@@ -2015,8 +2094,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_release_leader_lock(bp);
smp_mb();
- DP(NETIF_MSG_HW, "Releasing a leadership...\n");
-
+ DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
+ BNX2X_ERR("Can't unload in closed or error state\n");
return -EINVAL;
}
@@ -2045,6 +2124,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_drv_pulse(bp);
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
@@ -2108,7 +2188,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
return 0;
@@ -2120,7 +2200,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/* If there is no power capability, silently succeed */
if (!bp->pm_cap) {
- DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+ BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
@@ -2161,6 +2241,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
default:
+ dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
return -EINVAL;
}
return 0;
@@ -2230,7 +2311,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
napi_complete(napi);
/* Re-enable interrupts */
- DP(NETIF_MSG_HW,
+ DP(NETIF_MSG_RX_STATUS,
"Update index to %d\n", fp->fp_hc_idx);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
le16_to_cpu(fp->fp_hc_idx),
@@ -2264,9 +2345,8 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
- "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
- h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -2406,8 +2486,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
exit_lbl:
if (unlikely(to_copy))
DP(NETIF_MSG_TX_QUEUED,
- "Linearization IS REQUIRED for %s packet. "
- "num_frags %d hlen %d first_bd_sz %d\n",
+ "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
(xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
@@ -2615,7 +2694,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
/* enable this debug print to view the transmission queue being used
- DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
+ DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
/* locate the fastpath and the txdata */
@@ -2623,8 +2702,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata = &fp->txdata[txdata_index];
/* enable this debug print to view the tranmission details
- DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
- " tx_data ptr %p fp pointer %p\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
@@ -2635,8 +2714,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
- "protocol(%x,%x) gso type %x xmit_type %x\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
@@ -2658,8 +2737,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Statistics of linearization */
bp->lin_cnt++;
if (skb_linearize(skb) != 0) {
- DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
- "silently dropping this SKB\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB linearization failed - silently dropping this SKB\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2669,8 +2748,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
- "silently dropping this SKB\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB mapping failed - silently dropping this SKB\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2765,8 +2844,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
- DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
- " nbytes %d flags %x vlan %x\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
@@ -2809,8 +2888,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
unsigned int pkts_compl = 0, bytes_compl = 0;
- DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
- "dropping packet...\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "Unable to map page - dropping packet...\n");
/* we need unmap all buffers already mapped
* for this SKB;
@@ -2866,8 +2945,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e1x)
DP(NETIF_MSG_TX_QUEUED,
- "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
- " tcp_flags %x xsum %x seq %u hlen %u\n",
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
@@ -2943,23 +3021,22 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* requested to support too many traffic classes */
if (num_tc > bp->max_cos) {
- DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
- " requested: %d. max supported is %d\n",
- num_tc, bp->max_cos);
+ BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
+ num_tc, bp->max_cos);
return -EINVAL;
}
/* declare amount of supported traffic classes */
if (netdev_set_num_tc(dev, num_tc)) {
- DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
- num_tc);
+ BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
return -EINVAL;
}
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
- DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping priority %d to tc %d\n",
prio, bp->prio_to_cos[prio]);
}
@@ -2979,7 +3056,8 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
count = BNX2X_NUM_ETH_QUEUES(bp);
offset = cos * MAX_TXQS_PER_COS;
netdev_set_tc_queue(dev, cos, count, offset);
- DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping tc %d to offset %d count %d\n",
cos, offset, count);
}
@@ -2993,12 +3071,16 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
+ BNX2X_ERR("Requested MAC address is not valid\n");
return -EINVAL;
+ }
#ifdef BCM_CNIC
- if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
+ BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
return -EINVAL;
+ }
#endif
if (netif_running(dev)) {
@@ -3007,6 +3089,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3071,7 +3154,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- DP(BNX2X_MSG_SP,
+ DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
fp_index, cos, txdata->cid);
@@ -3116,15 +3199,22 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
int rx_ring_size = 0;
#ifdef BCM_CNIC
- if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
+ if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
} else
#endif
if (!bp->rx_ring_size) {
+ u32 cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+ /* Dercease ring size for 1G functions */
+ if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SGMII)
+ rx_ring_size /= 10;
+
/* allocate at least number of buffers required by FW */
rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA, rx_ring_size);
@@ -3163,8 +3253,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- DP(BNX2X_MSG_SP, "allocating tx memory of "
- "fp %d cos %d\n",
+ DP(NETIF_MSG_IFUP,
+ "allocating tx memory of fp %d cos %d\n",
index, cos);
BNX2X_ALLOC(txdata->tx_buf_ring,
@@ -3401,6 +3491,7 @@ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
cp->fcoe_wwn_port_name_lo);
break;
default:
+ BNX2X_ERR("Wrong WWN type requested - %d\n", type);
return -EINVAL;
}
@@ -3414,13 +3505,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ BNX2X_ERR("Can't perform change MTU during parity recovery\n");
return -EAGAIN;
}
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ BNX2X_ERR("Can't support requested MTU size\n");
return -EINVAL;
+ }
/* This does not race with packet allocation
* because the actual alloc size is
@@ -3428,17 +3521,21 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ bp->gro_check = bnx2x_need_gro_check(new_mtu);
+
return bnx2x_reload_if_running(dev);
}
netdev_features_t bnx2x_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
features &= ~NETIF_F_LRO;
+ features &= ~NETIF_F_GRO;
+ }
return features;
}
@@ -3454,6 +3551,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
else
flags &= ~TPA_ENABLE_FLAG;
+ if (features & NETIF_F_GRO)
+ flags |= GRO_ENABLE_FLAG;
+ else
+ flags &= ~GRO_ENABLE_FLAG;
+
if (features & NETIF_F_LOOPBACK) {
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
@@ -3541,7 +3643,7 @@ int bnx2x_resume(struct pci_dev *pdev)
bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -3557,8 +3659,6 @@ int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- /* Since the chip was reset, clear the FW sequence number */
- bp->fw_seq = 0;
rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();
@@ -3588,8 +3688,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
REG_WR8(bp, addr, ticks);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
- port, fw_sb_id, sb_index, ticks);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
}
static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
@@ -3604,8 +3705,9 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
REG_WR16(bp, addr, flags);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
- port, fw_sb_id, sb_index, disable);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
}
void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 4f40f7d7d8c6..8b163388659a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -379,8 +379,8 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
unsigned long ramrod_flags);
/* Parity errors related */
-void bnx2x_inc_load_cnt(struct bnx2x *bp);
-u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
+void bnx2x_set_pf_load(struct bnx2x *bp);
+bool bnx2x_clear_pf_load(struct bnx2x *bp);
bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
void bnx2x_set_reset_in_progress(struct bnx2x *bp);
@@ -534,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif
+
netdev_features_t bnx2x_fix_features(struct net_device *dev,
- netdev_features_t features);
+ netdev_features_t features);
int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
/**
@@ -597,7 +598,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
(update << IGU_REGULAR_BUPDATE_SHIFT) |
(op << IGU_REGULAR_ENABLE_INT_SHIFT));
- DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
+ DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr);
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
@@ -614,8 +615,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func |
- ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
+ u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -648,8 +648,8 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
- DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
- "idu_sb_id %d offset %d bit %d (cnt %d)\n",
+ DP(NETIF_MSG_HW,
+ "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
}
}
@@ -668,8 +668,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
(op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
- DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
- (*(u32 *)&igu_ack), hc_addr);
REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */
@@ -703,9 +701,6 @@ static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
COMMAND_REG_SIMD_MASK);
u32 result = REG_RD(bp, hc_addr);
- DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
- result, hc_addr);
-
barrier();
return result;
}
@@ -715,7 +710,7 @@ static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
u32 result = REG_RD(bp, igu_addr);
- DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
+ DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
result, igu_addr);
barrier();
@@ -893,13 +888,16 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
- if (unlikely(page == NULL))
+ if (unlikely(page == NULL)) {
+ BNX2X_ERR("Can't alloc sge\n");
return -ENOMEM;
+ }
mapping = dma_map_page(&bp->pdev->dev, page, 0,
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
+ BNX2X_ERR("Can't map sge\n");
return -ENOMEM;
}
@@ -929,6 +927,7 @@ static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
kfree(data);
+ BNX2X_ERR("Can't map rx data\n");
return -ENOMEM;
}
@@ -971,7 +970,7 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
*/
static inline int bnx2x_func_start(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_start_params *start_params =
&func_params.params.start;
@@ -984,10 +983,11 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
/* Function parameters */
start_params->mf_mode = bp->mf_mode;
start_params->sd_vlan_tag = bp->mf_ov;
- if (CHIP_IS_E1x(bp))
- start_params->network_cos_mode = OVERRIDE_COS;
- else
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
start_params->network_cos_mode = STATIC_COS;
+ else /* CHIP_IS_E1X */
+ start_params->network_cos_mode = FW_WRR;
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1142,7 +1142,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
{
struct bnx2x *bp = fp->bp;
u16 ring_prod, cqe_ring_prod;
- int i;
+ int i, failure_cnt = 0;
fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
@@ -1152,18 +1152,17 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
*/
for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ failure_cnt++;
continue;
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
+ WARN_ON(ring_prod <= (i - failure_cnt));
}
- if (fp->eth_q_stats.rx_skb_alloc_failed)
- BNX2X_ERR("was only able to allocate "
- "%d rx skbs on queue[%d]\n",
- (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
+ if (failure_cnt)
+ BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
+ i - failure_cnt, fp->index);
fp->rx_bd_prod = ring_prod;
/* Limit the CQE producer by the CQE ring size */
@@ -1171,7 +1170,9 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- return i - fp->eth_q_stats.rx_skb_alloc_failed;
+ fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+
+ return i - failure_cnt;
}
/* Statistics ID are global per chip/path, while Client IDs for E1x are per
@@ -1297,7 +1298,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
- DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n",
+ DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
}
@@ -1342,7 +1343,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
- DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index);
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
/* qZone id equals to FW (per path) client id */
bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
@@ -1361,8 +1362,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
- "igu_sb %d\n",
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
}
@@ -1375,8 +1376,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
while (bnx2x_has_tx_work_unload(txdata)) {
if (!cnt) {
- BNX2X_ERR("timeout waiting for queue[%d]: "
- "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+ BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
txdata->txq_index, txdata->tx_pkt_prod,
txdata->tx_pkt_cons);
#ifdef BNX2X_STOP_ON_ERROR
@@ -1453,8 +1453,8 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
netif_addr_lock_bh(bp->dev);
if (bp->sp_state & mask) {
- BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
- "mask 0x%lx\n", bp->sp_state, mask);
+ BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
+ bp->sp_state, mask);
netif_addr_unlock_bh(bp->dev);
return false;
}
@@ -1490,13 +1490,113 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (!max_cfg) {
- DP(NETIF_MSG_LINK,
+ DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
"Max BW configured to 0 - using 100 instead\n");
max_cfg = 100;
}
return max_cfg;
}
+/* checks if HW supports GRO for given MTU */
+static inline bool bnx2x_mtu_allows_gro(int mtu)
+{
+ /* gro frags per page */
+ int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
+
+ /*
+ * 1. number of frags should not grow above MAX_SKB_FRAGS
+ * 2. frag must fit the page
+ */
+ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
+}
+
+static inline bool bnx2x_need_gro_check(int mtu)
+{
+ return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
+ (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
+}
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct napi_struct orig_napi = fp->napi;
+ /* bzero bnx2x_fastpath contents */
+ if (bp->stats_init)
+ memset(fp, 0, sizeof(*fp));
+ else {
+ /* Keep Queue statistics */
+ struct bnx2x_eth_q_stats *tmp_eth_q_stats;
+ struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
+
+ tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
+ GFP_KERNEL);
+ if (tmp_eth_q_stats)
+ memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
+
+ tmp_eth_q_stats_old =
+ kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
+ GFP_KERNEL);
+ if (tmp_eth_q_stats_old)
+ memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ sizeof(struct bnx2x_eth_q_stats_old));
+
+ memset(fp, 0, sizeof(*fp));
+
+ if (tmp_eth_q_stats) {
+ memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
+ kfree(tmp_eth_q_stats);
+ }
+
+ if (tmp_eth_q_stats_old) {
+ memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ sizeof(struct bnx2x_eth_q_stats_old));
+ kfree(tmp_eth_q_stats_old);
+ }
+
+ }
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+
+ fp->bp = bp;
+ fp->index = index;
+ if (IS_ETH_FP(fp))
+ fp->max_cos = bp->max_cos;
+ else
+ /* Special queues support only one CoS */
+ fp->max_cos = 1;
+
+ /*
+ * set the tpa flag for each queue. The tpa flag determines the queue
+ * minimal size so it must be set prior to queue memory allocation
+ */
+ fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
+ (bp->flags & GRO_ENABLE_FLAG &&
+ bnx2x_mtu_allows_gro(bp->dev->mtu)));
+ if (bp->flags & TPA_ENABLE_FLAG)
+ fp->mode = TPA_MODE_LRO;
+ else if (bp->flags & GRO_ENABLE_FLAG)
+ fp->mode = TPA_MODE_GRO;
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on an FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ fp->disable_tpa = 1;
+#endif
+}
+
+#ifdef BCM_CNIC
/**
* bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
*
@@ -1504,7 +1604,7 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
*
*/
void bnx2x_get_iscsi_info(struct bnx2x *bp);
-
+#endif
/* returns func by VN for current port */
static inline int func_by_vn(struct bnx2x *bp, int vn)
{
@@ -1545,7 +1645,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
{
if (SHMEM2_HAS(bp, drv_flags)) {
u32 drv_flags;
- bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
drv_flags = SHMEM2_RD(bp, drv_flags);
if (set)
@@ -1554,8 +1654,8 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
RESET_FLAGS(drv_flags, flags);
SHMEM2_WR(bp, drv_flags, drv_flags);
- DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
- bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
}
}
@@ -1564,7 +1664,7 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
if (is_valid_ether_addr(addr))
return true;
#ifdef BCM_CNIC
- if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+ if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp))
return true;
#endif
return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6d82ade4c31c..4f9244bd7530 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2011 Broadcom Corporation
+ * Copyright 2009-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -121,26 +121,6 @@ static void bnx2x_pfc_clear(struct bnx2x *bp)
{
struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
nig_params.pause_enable = 1;
-#ifdef BNX2X_SAFC
- if (bp->flags & SAFC_TX_FLAG) {
- u32 high = 0, low = 0;
- int i;
-
- for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
- if (bp->pri_map[i] == 1)
- high |= (1 << i);
- if (bp->pri_map[i] == 0)
- low |= (1 << i);
- }
-
- nig_params.llfc_low_priority_classes = high;
- nig_params.llfc_low_priority_classes = low;
-
- nig_params.pause_enable = 0;
- nig_params.llfc_enable = 1;
- nig_params.llfc_out_en = 1;
- }
-#endif /* BNX2X_SAFC */
bnx2x_acquire_phy_lock(bp);
bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
@@ -167,27 +147,27 @@ static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
/* pfc */
- DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n",
features->pfc.pri_en_bitmap);
- DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n",
features->pfc.pfc_caps);
- DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n",
features->pfc.enabled);
- DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n",
features->app.default_pri);
- DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n",
features->app.tc_supported);
- DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n",
features->app.enabled);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
i, features->app.app_pri_tbl[i].app_id);
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
i, features->app.app_pri_tbl[i].pri_bitmap);
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
i, features->app.app_pri_tbl[i].appBitfield);
}
@@ -221,13 +201,16 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n");
if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n");
+ if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n");
if (app->enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
+ !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_APP_TLV_NOT_FOUND)) {
bp->dcbx_port_params.app.enabled = true;
@@ -256,7 +239,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
LLFC_TRAFFIC_TYPE_ISCSI);
}
} else {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n");
bp->dcbx_port_params.app.enabled = false;
for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
@@ -276,8 +259,10 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
+ if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n");
/* Clean up old settings of ets on COS */
for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
@@ -287,10 +272,10 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
cos_params[i].pri_bitmask = 0;
}
- if (bp->dcbx_port_params.app.enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
- ets->enabled) {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+ if (bp->dcbx_port_params.app.enabled && ets->enabled &&
+ !GET_FLAGS(error,
+ DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) {
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n");
bp->dcbx_port_params.ets.enabled = true;
bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
@@ -305,7 +290,7 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
ets, pg_pri_orginal_spread);
} else {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n");
bp->dcbx_port_params.ets.enabled = false;
ets->pri_pg_tbl[0] = 0;
@@ -319,16 +304,18 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
{
if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
- if (bp->dcbx_port_params.app.enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
- pfc->enabled) {
+ if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n");
+ if (bp->dcbx_port_params.app.enabled && pfc->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_PFC_TLV_NOT_FOUND)) {
bp->dcbx_port_params.pfc.enabled = true;
bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
~(pfc->pri_en_bitmap);
} else {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n");
bp->dcbx_port_params.pfc.enabled = false;
bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
}
@@ -352,7 +339,7 @@ static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
if (cos_params[i].pri_bitmask & nw_prio) {
/* extend the bitmask with unmapped */
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"cos %d extended with 0x%08x\n", i, unmapped);
cos_params[i].pri_bitmask |= unmapped;
break;
@@ -443,18 +430,18 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
- DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
+ DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
return bnx2x_func_state_change(bp, &func_params);
}
static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
@@ -463,7 +450,7 @@ static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
bnx2x_dcbx_fw_struct(bp, tx_params);
- DP(NETIF_MSG_LINK, "START TRAFFIC\n");
+ DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
return bnx2x_func_state_change(bp, &func_params);
}
@@ -529,7 +516,7 @@ static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
/*
* In E3B0 the configuration may have more than 2 COS.
*/
-void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
{
struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
struct bnx2x_ets_params ets_params = { 0 };
@@ -588,7 +575,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
int rc;
- DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n",
dcbx_remote_mib_offset);
if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
@@ -617,7 +604,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
int rc;
- DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+ DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
@@ -693,7 +680,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
& (1 << prio)) {
bp->prio_to_cos[prio] = cos;
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"tx_mapping %d --> %d\n", prio, cos);
}
}
@@ -712,7 +699,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
switch (state) {
case BNX2X_DCBX_STATE_NEG_RECEIVED:
{
- DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
#ifdef BCM_DCBNL
/**
* Delete app tlvs from dcbnl before reading new
@@ -762,7 +749,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
- DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n");
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
@@ -770,7 +757,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
- DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
#ifdef BCM_DCBNL
/*
@@ -861,7 +848,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
(u8)dp->admin_configuration_bw_precentage[i]);
- DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+ DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n",
i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
}
@@ -869,7 +856,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
(u8)dp->admin_configuration_ets_pg[i]);
- DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+ DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n",
i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
}
@@ -923,7 +910,7 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
bp->dcb_state = false;
bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
}
- DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
+ DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n",
dcb_on ? "ON" : "OFF",
dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
@@ -945,30 +932,30 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
bp->dcbx_config_params.admin_ets_reco_valid = 1;
bp->dcbx_config_params.admin_app_priority_willing = 1;
- bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
- bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
- bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
- bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
- bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0;
bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0;
bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
@@ -977,25 +964,12 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
- bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
- bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
- bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x0;
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 0;
bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
- bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
- bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
- bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
- bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
- bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
- bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
- bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
- bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
- bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
- bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
- bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
- bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
- bp->dcbx_config_params.admin_default_priority =
- bp->dcbx_config_params.admin_priority_app_table[1].priority;
+ bp->dcbx_config_params.admin_default_priority = 0;
}
void bnx2x_dcbx_init(struct bnx2x *bp)
@@ -1011,7 +985,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
* the function is pmf
* shmem2 contains DCBX support fields
*/
- DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
+ DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
@@ -1019,7 +993,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
- DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n",
dcbx_lldp_params_offset);
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
@@ -1041,38 +1015,36 @@ bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
u8 pri = 0;
u8 cos = 0;
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
- DP(NETIF_MSG_LINK,
- "pdev->params.dcbx_port_params.pfc."
- "priority_non_pauseable_mask %x\n",
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n",
bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].pri_bitmask %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].bw_tbl %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].strict %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].strict);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].strict);
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].pauseable %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable);
}
for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
- DP(NETIF_MSG_LINK,
- "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
- "priority %x\n", pri,
- pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+ DP(BNX2X_MSG_DCB,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
}
@@ -1119,7 +1091,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
help_data->num_of_pg++;
}
}
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"add_traf_type %d pg_found %s num_of_pg %d\n",
add_traf_type, (false == pg_found) ? "NO" : "YES",
help_data->num_of_pg);
@@ -1312,8 +1284,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
}
if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
- BNX2X_ERR("Invalid value for pri_join_mask -"
- " could not find a priority\n");
+ BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n");
cos_data->data[0].pri_join_mask = pri_mask_without_pri;
cos_data->data[1].pri_join_mask = pri_tested;
@@ -1626,8 +1597,10 @@ static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
num_of_app_pri--;
}
- if (num_spread_of_entries)
+ if (num_spread_of_entries) {
+ BNX2X_ERR("Didn't succeed to spread strict priorities\n");
return -EINVAL;
+ }
return 0;
}
@@ -1675,8 +1648,7 @@ static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
DCBX_COS_MAX_NUM_E3B0)) {
- BNX2X_ERR("Unable to reduce the number of PGs -"
- "we will disables ETS\n");
+ BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n");
bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
pri_join_mask);
return;
@@ -1776,24 +1748,24 @@ static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
if (p->pauseable &&
DCBX_PFC_PRI_GET_NON_PAUSE(bp,
p->pri_bitmask) != 0)
- BNX2X_ERR("Inconsistent config for "
- "pausable COS %d\n", i);
+ BNX2X_ERR("Inconsistent config for pausable COS %d\n",
+ i);
if (!p->pauseable &&
DCBX_PFC_PRI_GET_PAUSE(bp,
p->pri_bitmask) != 0)
- BNX2X_ERR("Inconsistent config for "
- "nonpausable COS %d\n", i);
+ BNX2X_ERR("Inconsistent config for nonpausable COS %d\n",
+ i);
}
}
if (p->pauseable)
- DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n",
i, cos_data.data[i].pri_join_mask);
else
- DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
- "0x%x\n",
- i, cos_data.data[i].pri_join_mask);
+ DP(BNX2X_MSG_DCB,
+ "COS %d NONPAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
}
bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
@@ -1808,7 +1780,7 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
- DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+ DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n",
i, set_configuration_ets_pg[i]);
}
}
@@ -1904,14 +1876,14 @@ static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
+ DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state);
return bp->dcb_state;
}
static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+ DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
@@ -1921,7 +1893,7 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
+ DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n");
/* first the HW mac address */
memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
@@ -1938,7 +1910,7 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
+ DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid);
if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
return;
@@ -1963,7 +1935,7 @@ static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
int pgid, u8 bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
+ DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct);
if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
return;
@@ -1977,14 +1949,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
u8 up_map)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
}
static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
int pgid, u8 bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
}
static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
@@ -1992,7 +1964,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
u8 *up_map)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+ DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
/**
* bw_pct ingnored - band-width percentage devision between user
@@ -2018,7 +1990,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
+ DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid);
*bw_pct = 0;
@@ -2033,7 +2005,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
u8 *up_map)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
*prio_type = *pgid = *bw_pct = *up_map = 0;
}
@@ -2042,7 +2014,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
*bw_pct = 0;
}
@@ -2051,7 +2023,7 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
u8 setting)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
+ DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting);
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
@@ -2066,7 +2038,7 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
u8 *setting)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+ DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
*setting = 0;
@@ -2081,21 +2053,21 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
struct bnx2x *bp = netdev_priv(netdev);
int rc = 0;
- DP(NETIF_MSG_LINK, "SET-ALL\n");
+ DP(BNX2X_MSG_DCB, "SET-ALL\n");
if (!bnx2x_dcbnl_set_valid(bp))
return 1;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- netdev_err(bp->dev, "Handling parity error recovery. "
- "Try again later\n");
+ netdev_err(bp->dev,
+ "Handling parity error recovery. Try again later\n");
return 1;
}
if (netif_running(bp->dev)) {
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
rc = bnx2x_nic_load(bp, LOAD_NORMAL);
}
- DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
+ DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
@@ -2134,22 +2106,25 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
*cap = BNX2X_DCBX_CAPS;
break;
default:
+ BNX2X_ERR("Non valid capability ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
rval = -EINVAL;
+ }
- DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
+ DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
return rval;
}
-static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct bnx2x *bp = netdev_priv(netdev);
u8 rval = 0;
- DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
+ DP(BNX2X_MSG_DCB, "tcid %d\n", tcid);
if (bp->dcb_state) {
switch (tcid) {
@@ -2162,26 +2137,29 @@ static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
DCBX_COS_MAX_NUM_E2;
break;
default:
+ BNX2X_ERR("Non valid TC-ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
rval = -EINVAL;
+ }
return rval;
}
-static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
+ DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num);
return -EINVAL;
}
static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+ DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
if (!bp->dcb_state)
return 0;
@@ -2192,7 +2170,7 @@ static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+ DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
if (!bnx2x_dcbnl_set_valid(bp))
return;
@@ -2269,9 +2247,11 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
bnx2x_admin_app_set_ent(
&bp->dcbx_config_params.admin_priority_app_table[ff],
idtype, idval, up);
- else
+ else {
/* app table is full */
+ BNX2X_ERR("Application table is too large\n");
return -EBUSY;
+ }
/* up configured, if not 0 make sure feature is enabled */
if (up)
@@ -2285,11 +2265,13 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
+ DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n",
idtype, idval, up);
- if (!bnx2x_dcbnl_set_valid(bp))
+ if (!bnx2x_dcbnl_set_valid(bp)) {
+ DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
return -EINVAL;
+ }
/* verify idtype */
switch (idtype) {
@@ -2297,6 +2279,7 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
case DCB_APP_IDTYPE_PORTNUM:
break;
default:
+ DP(BNX2X_MSG_DCB, "Wrong ID type\n");
return -EINVAL;
}
return bnx2x_set_admin_app_up(bp, idtype, idval, up);
@@ -2318,13 +2301,13 @@ static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %02x\n", state);
+ DP(BNX2X_MSG_DCB, "state = %02x\n", state);
/* set dcbx mode */
if ((state & BNX2X_DCBX_CAPS) != state) {
- BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
- "capabilities\n", state);
+ BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n",
+ state);
return 1;
}
@@ -2348,7 +2331,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
struct bnx2x *bp = netdev_priv(netdev);
u8 rval = 0;
- DP(NETIF_MSG_LINK, "featid %d\n", featid);
+ DP(BNX2X_MSG_DCB, "featid %d\n", featid);
if (bp->dcb_state) {
*flags = 0;
@@ -2374,11 +2357,14 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
*flags |= DCB_FEATCFG_ERROR;
break;
default:
+ BNX2X_ERR("Non valid featrue-ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
rval = -EINVAL;
+ }
return rval;
}
@@ -2389,7 +2375,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
struct bnx2x *bp = netdev_priv(netdev);
u8 rval = 0;
- DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
+ DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags);
/* ignore the 'advertise' flag */
if (bnx2x_dcbnl_set_valid(bp)) {
@@ -2412,11 +2398,14 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
flags & DCB_FEATCFG_WILLING ? 1 : 0;
break;
default:
+ BNX2X_ERR("Non valid featrue-ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
rval = -EINVAL;
+ }
return rval;
}
@@ -2427,7 +2416,7 @@ static int bnx2x_peer_appinfo(struct net_device *netdev,
int i;
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "APP-INFO\n");
+ DP(BNX2X_MSG_DCB, "APP-INFO\n");
info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
@@ -2446,7 +2435,7 @@ static int bnx2x_peer_apptable(struct net_device *netdev,
int i, j;
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "APP-TABLE\n");
+ DP(BNX2X_MSG_DCB, "APP-TABLE\n");
for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
struct dcbx_app_priority_entry *ent =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2ab9254e2d5e..06c7a0435948 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2011 Broadcom Corporation
+ * Copyright 2009-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b983825d0ee9..3e4cff9b1ebe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2011 Broadcom Corporation
+ * Copyright (c) 2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 31a8b38ab15e..2cc0a1703970 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -175,7 +175,11 @@ static const struct {
{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
{ STATS_OFFSET32(total_tpa_bytes_hi),
- 8, STATS_FLAGS_FUNC, "tpa_bytes"}
+ 8, STATS_FLAGS_FUNC, "tpa_bytes"},
+ { STATS_OFFSET32(recoverable_error),
+ 4, STATS_FLAGS_FUNC, "recoverable_errors" },
+ { STATS_OFFSET32(unrecoverable_error),
+ 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -218,20 +222,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
- if ((bp->state == BNX2X_STATE_OPEN) &&
- !(bp->flags & MF_FUNC_DIS) &&
- (bp->link_vars.link_up)) {
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
- cmd->duplex = bp->link_vars.duplex;
+ if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
+ if (!(bp->flags & MF_FUNC_DIS)) {
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->duplex = bp->link_vars.duplex;
+ } else {
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ }
+
+ if (IS_MF(bp) && !BP_NOMCP(bp))
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
} else {
- ethtool_cmd_speed_set(
- cmd, bp->link_params.req_line_speed[cfg_idx]);
- cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
}
- if (IS_MF(bp))
- ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
-
cmd->port = bnx2x_get_port_type(bp);
cmd->phy_address = bp->mdio.prtad;
@@ -242,10 +249,38 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else
cmd->autoneg = AUTONEG_DISABLE;
+ /* Publish LP advertised speeds and FC */
+ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ u32 status = bp->link_vars.link_status;
+
+ cmd->lp_advertising |= ADVERTISED_Autoneg;
+ if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Pause;
+ if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ }
+
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
- DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
@@ -266,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (IS_MF_SD(bp))
return 0;
- DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
@@ -277,6 +312,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed = ethtool_cmd_speed(cmd);
+ /* If recieved a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+
if (IS_MF_SI(bp)) {
u32 part;
u32 line_speed = bp->link_vars.line_speed;
@@ -286,18 +325,17 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
line_speed = 10000;
if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
- BNX2X_DEV_INFO("To set speed BC %X or higher "
- "is required, please upgrade BC\n",
- REQ_BC_VER_4_SET_MF_BW);
+ DP(BNX2X_MSG_ETHTOOL,
+ "To set speed BC %X or higher is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
part = (speed * 100) / line_speed;
if (line_speed < speed || !part) {
- BNX2X_DEV_INFO("Speed setting should be in a range "
- "from 1%% to 100%% "
- "of actual line speed\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
return -EINVAL;
}
@@ -319,7 +357,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (!(bp->port.supported[0] & SUPPORTED_TP ||
bp->port.supported[1] & SUPPORTED_TP)) {
- DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
bp->link_params.multi_phy_config &=
@@ -339,7 +377,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
- DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
bp->link_params.multi_phy_config &=
@@ -353,7 +391,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
/* Save new config in case command complete successully */
@@ -362,7 +400,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
/* Restore old config in case command failed */
bp->link_params.multi_phy_config = old_multi_phy_config;
- DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+ DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
if (cmd->autoneg == AUTONEG_ENABLE) {
u32 an_supported_speed = bp->port.supported[cfg_idx];
@@ -371,14 +409,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
an_supported_speed |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full);
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
- DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+ DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
if (cmd->advertising & ~an_supported_speed) {
- DP(NETIF_MSG_LINK, "Advertisement parameters "
- "are not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "Advertisement parameters are not supported\n");
return -EINVAL;
}
@@ -427,7 +465,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"10M full not supported\n");
return -EINVAL;
}
@@ -437,7 +475,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Half)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"10M half not supported\n");
return -EINVAL;
}
@@ -451,7 +489,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"100M full not supported\n");
return -EINVAL;
}
@@ -461,7 +499,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Half)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"100M half not supported\n");
return -EINVAL;
}
@@ -473,13 +511,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_1000:
if (cmd->duplex != DUPLEX_FULL) {
- DP(NETIF_MSG_LINK, "1G half not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "1G half not supported\n");
return -EINVAL;
}
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_1000baseT_Full)) {
- DP(NETIF_MSG_LINK, "1G full not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "1G full not supported\n");
return -EINVAL;
}
@@ -489,14 +529,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_2500:
if (cmd->duplex != DUPLEX_FULL) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"2.5G half not supported\n");
return -EINVAL;
}
if (!(bp->port.supported[cfg_idx]
& SUPPORTED_2500baseX_Full)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"2.5G full not supported\n");
return -EINVAL;
}
@@ -507,13 +547,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_10000:
if (cmd->duplex != DUPLEX_FULL) {
- DP(NETIF_MSG_LINK, "10G half not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "10G half not supported\n");
return -EINVAL;
}
if (!(bp->port.supported[cfg_idx]
& SUPPORTED_10000baseT_Full)) {
- DP(NETIF_MSG_LINK, "10G full not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "10G full not supported\n");
return -EINVAL;
}
@@ -522,7 +564,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
return -EINVAL;
}
@@ -531,7 +573,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->port.advertising[cfg_idx] = advertising;
}
- DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
" req_duplex %d advertising 0x%x\n",
bp->link_params.req_line_speed[cfg_idx],
bp->link_params.req_duplex[cfg_idx],
@@ -774,14 +816,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
- if (bp->port.pmf) {
- bnx2x_acquire_phy_lock(bp);
- bnx2x_get_ext_phy_fw_version(&bp->link_params,
- (bp->state != BNX2X_STATE_CLOSED),
- phy_fw_ver, PHY_FW_VER_LEN);
- bnx2x_release_phy_lock(bp);
- }
-
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
@@ -817,13 +853,16 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnx2x *bp = netdev_priv(dev);
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (wol->wolopts & ~WAKE_MAGIC) {
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
return -EINVAL;
+ }
if (wol->wolopts & WAKE_MAGIC) {
- if (bp->flags & NO_WOL_FLAG)
+ if (bp->flags & NO_WOL_FLAG) {
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
return -EINVAL;
-
+ }
bp->wol = 1;
} else
bp->wol = 0;
@@ -882,11 +921,27 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
return bp->common.flash_size;
}
+/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
+ * we done things the other way around, if two pfs from the same port would
+ * attempt to access nvram at the same time, we could run into a scenario such
+ * as:
+ * pf A takes the port lock.
+ * pf B succeeds in taking the same lock since they are from the same port.
+ * pf A takes the per pf misc lock. Performs eeprom access.
+ * pf A finishes. Unlocks the per pf misc lock.
+ * Pf B takes the lock and proceeds to perform it's own access.
+ * pf A unlocks the per port lock, while pf B is still working (!).
+ * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * acess corrupted by pf B).*
+ */
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int count, i;
- u32 val = 0;
+ u32 val;
+
+ /* acquire HW lock: protect against other PFs in PF Direct Assignment */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
/* adjust timeout for emulation/FPGA */
count = BNX2X_NVRAM_TIMEOUT_COUNT;
@@ -906,7 +961,8 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
}
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
- DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot get access to nvram interface\n");
return -EBUSY;
}
@@ -917,7 +973,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int count, i;
- u32 val = 0;
+ u32 val;
/* adjust timeout for emulation/FPGA */
count = BNX2X_NVRAM_TIMEOUT_COUNT;
@@ -937,10 +993,13 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
}
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
- DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot free access to nvram interface\n");
return -EBUSY;
}
+ /* release HW lock: protect against other PFs in PF Direct Assignment */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
return 0;
}
@@ -1009,7 +1068,9 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
break;
}
}
-
+ if (rc == -EBUSY)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram read timeout expired\n");
return rc;
}
@@ -1021,15 +1082,15 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
__be32 val;
if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
- DP(BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"Invalid parameter: offset 0x%x buf_size 0x%x\n",
offset, buf_size);
return -EINVAL;
}
if (offset + buf_size > bp->common.flash_size) {
- DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
- " buf_size (0x%x) > flash_size (0x%x)\n",
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
offset, buf_size, bp->common.flash_size);
return -EINVAL;
}
@@ -1074,10 +1135,13 @@ static int bnx2x_get_eeprom(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -EAGAIN;
+ }
- DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
eeprom->len, eeprom->len);
@@ -1126,6 +1190,9 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
}
}
+ if (rc == -EBUSY)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram write timeout expired\n");
return rc;
}
@@ -1140,8 +1207,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
__be32 val;
if (offset + buf_size > bp->common.flash_size) {
- DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
- " buf_size (0x%x) > flash_size (0x%x)\n",
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
offset, buf_size, bp->common.flash_size);
return -EINVAL;
}
@@ -1189,15 +1256,15 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
- DP(BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"Invalid parameter: offset 0x%x buf_size 0x%x\n",
offset, buf_size);
return -EINVAL;
}
if (offset + buf_size > bp->common.flash_size) {
- DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
- " buf_size (0x%x) > flash_size (0x%x)\n",
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
offset, buf_size, bp->common.flash_size);
return -EINVAL;
}
@@ -1245,10 +1312,13 @@ static int bnx2x_set_eeprom(struct net_device *dev,
int port = BP_PORT(bp);
int rc = 0;
u32 ext_phy_config;
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -EAGAIN;
+ }
- DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
eeprom->len, eeprom->len);
@@ -1257,8 +1327,11 @@ static int bnx2x_set_eeprom(struct net_device *dev,
/* PHY eeprom can be accessed only by the PMF */
if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
- !bp->port.pmf)
+ !bp->port.pmf) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "wrong magic or interface is not pmf\n");
return -EINVAL;
+ }
ext_phy_config =
SHMEM_RD(bp,
@@ -1370,7 +1443,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -1378,8 +1452,10 @@ static int bnx2x_set_ringparam(struct net_device *dev,
(ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA)) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
- (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+ (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
+ }
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
@@ -1392,15 +1468,22 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ int cfg_reg;
+
epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
BNX2X_FLOW_CTRL_AUTO);
- epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+ if (!epause->autoneg)
+ cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
+ else
+ cfg_reg = bp->link_params.req_fc_auto_adv;
+
+ epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
BNX2X_FLOW_CTRL_RX);
- epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+ epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
BNX2X_FLOW_CTRL_TX);
- DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
" autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
}
@@ -1413,7 +1496,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (IS_MF(bp))
return 0;
- DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
" autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
@@ -1430,7 +1513,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (epause->autoneg) {
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
- DP(NETIF_MSG_LINK, "autoneg not supported\n");
+ DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
return -EINVAL;
}
@@ -1440,7 +1523,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
}
}
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
if (netif_running(dev)) {
@@ -1572,8 +1655,11 @@ static int bnx2x_test_registers(struct bnx2x *bp)
{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
};
- if (!netif_running(bp->dev))
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return rc;
+ }
if (CHIP_IS_E1(bp))
hw = BNX2X_CHIP_MASK_E1;
@@ -1618,7 +1704,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
/* verify value is as expected */
if ((val & mask) != (wr_val & mask)) {
- DP(NETIF_MSG_HW,
+ DP(BNX2X_MSG_ETHTOOL,
"offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
offset, val, wr_val, mask);
goto test_reg_exit;
@@ -1672,8 +1758,11 @@ static int bnx2x_test_memory(struct bnx2x *bp)
{ NULL, 0xffffffff, {0, 0, 0, 0} }
};
- if (!netif_running(bp->dev))
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return rc;
+ }
if (CHIP_IS_E1(bp))
index = BNX2X_CHIP_E1_OFST;
@@ -1688,7 +1777,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if (val & ~(prty_tbl[i].hw_mask[index])) {
- DP(NETIF_MSG_HW,
+ DP(BNX2X_MSG_ETHTOOL,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
}
@@ -1703,7 +1792,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if (val & ~(prty_tbl[i].hw_mask[index])) {
- DP(NETIF_MSG_HW,
+ DP(BNX2X_MSG_ETHTOOL,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
}
@@ -1724,7 +1813,7 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
msleep(20);
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
- DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+ DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
}
}
@@ -1774,6 +1863,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
}
@@ -1782,6 +1872,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
if (!skb) {
+ DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
rc = -ENOMEM;
goto test_loopback_exit;
}
@@ -1796,7 +1887,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
rc = -ENOMEM;
dev_kfree_skb(skb);
- BNX2X_ERR("Unable to map SKB\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
goto test_loopback_exit;
}
@@ -1879,7 +1970,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
goto test_loopback_rx_exit;
- len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
if (len != pkt_size)
goto test_loopback_rx_exit;
@@ -1926,13 +2017,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
if (res) {
- DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
+ DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
rc |= BNX2X_PHY_LOOPBACK_FAILED;
}
res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
if (res) {
- DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
+ DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
rc |= BNX2X_MAC_LOOPBACK_FAILED;
}
@@ -1958,23 +2049,33 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- __be32 buf[0x350 / 4];
- u8 *data = (u8 *)buf;
+ __be32 *buf;
+ u8 *data;
int i, rc;
u32 magic, crc;
if (BP_NOMCP(bp))
return 0;
+ buf = kmalloc(0x350, GFP_KERNEL);
+ if (!buf) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
+ rc = -ENOMEM;
+ goto test_nvram_exit;
+ }
+ data = (u8 *)buf;
+
rc = bnx2x_nvram_read(bp, 0, data, 4);
if (rc) {
- DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "magic value read (rc %d)\n", rc);
goto test_nvram_exit;
}
magic = be32_to_cpu(buf[0]);
if (magic != 0x669955aa) {
- DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "wrong magic value (0x%08x)\n", magic);
rc = -ENODEV;
goto test_nvram_exit;
}
@@ -1984,31 +2085,35 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
nvram_tbl[i].size);
if (rc) {
- DP(NETIF_MSG_PROBE,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"nvram_tbl[%d] read data (rc %d)\n", i, rc);
goto test_nvram_exit;
}
crc = ether_crc_le(nvram_tbl[i].size, data);
if (crc != CRC32_RESIDUAL) {
- DP(NETIF_MSG_PROBE,
- "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc);
rc = -ENODEV;
goto test_nvram_exit;
}
}
test_nvram_exit:
+ kfree(buf);
return rc;
}
/* Send an EMPTY ramrod on the first queue */
static int bnx2x_test_intr(struct bnx2x *bp)
{
- struct bnx2x_queue_state_params params = {0};
+ struct bnx2x_queue_state_params params = {NULL};
- if (!netif_running(bp->dev))
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -ENODEV;
+ }
params.q_obj = &bp->fp->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
@@ -2024,7 +2129,8 @@ static void bnx2x_self_test(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ netdev_err(bp->dev,
+ "Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
@@ -2237,11 +2343,16 @@ static int bnx2x_set_phys_id(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -EAGAIN;
+ }
- if (!bp->port.pmf)
+ if (!bp->port.pmf) {
+ DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
return -EOPNOTSUPP;
+ }
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -2278,6 +2389,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0;
default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 998652a1b858..cd6dfa9eaa3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -243,18 +243,6 @@
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
-/**
-* This file defines HSI constants for the ETH flow
-*/
-#ifdef _EVEREST_MICROCODE
-#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
-#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
-#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
-#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
-#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
-#endif
-
-
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f4a07fbaed05..4bed52ba300d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3e30c8642c26..5d71b7d43237 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -34,9 +34,10 @@ struct license_key {
};
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
/****************************************************************************
* Shared HW configuration *
@@ -618,12 +619,6 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
- /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
- #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
- #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
- #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
- #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
-
/* Determine the Serdes electrical interface */
#define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
#define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
@@ -898,11 +893,6 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
- #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
- #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
- #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
- #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
-
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -1139,8 +1129,7 @@ struct shm_dev_info { /* size */
#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
-/* LED Blink rate that will achieve ~15.9Hz */
-#define LED_BLINK_RATE_VAL 480
+#define MFW_TRACE_SIGNATURE 0x54524342
/****************************************************************************
* Driver <-> FW Mailbox *
@@ -1407,7 +1396,7 @@ struct port_mf_cfg {
#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
- u32 reserved[3];
+ u32 reserved[1];
};
@@ -1493,7 +1482,8 @@ struct func_ext_cfg {
struct mf_cfg {
struct shared_mf_cfg shared_mf_config; /* 0x4 */
- struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */
+ /* 0x8*2*2=0x20 */
+ struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
/* for all chips, there are 8 mf functions */
struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
/*
@@ -1845,6 +1835,9 @@ struct lldp_local_mib {
#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
#define DCBX_LOCAL_APP_MISMATCH 0x00000020
#define DCBX_REMOTE_MIB_ERROR 0x00000040
+ #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080
+ #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100
+ #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200
struct dcbx_features features;
u32 suffix_seq_num;
};
@@ -2002,6 +1995,7 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_VER_SHIFT 0
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
+ u32 ibft_host_addr; /* initialized by option ROM */
};
@@ -2700,8 +2694,8 @@ union drv_info_to_mcp {
struct iscsi_stats_info iscsi_stat;
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 0
-#define BCM_5710_FW_REVISION_VERSION 29
+#define BCM_5710_FW_MINOR_VERSION 2
+#define BCM_5710_FW_REVISION_VERSION 16
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3308,8 +3302,10 @@ struct client_init_rx_data {
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -3324,7 +3320,7 @@ struct client_init_rx_data {
u8 outer_vlan_removal_enable_flg;
u8 status_block_id;
u8 rx_sb_index_number;
- u8 reserved0;
+ u8 dont_verify_rings_pause_thr_flg;
u8 max_tpa_queues;
u8 silent_vlan_removal_flg;
__le16 max_bytes_on_bd;
@@ -3657,7 +3653,7 @@ struct eth_fast_path_rx_cqe {
u8 placement_offset;
__le32 rss_hash_result;
__le16 vlan_tag;
- __le16 pkt_len;
+ __le16 pkt_len_or_gro_seg_len;
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
@@ -4215,6 +4211,15 @@ enum set_mac_action_type {
/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode {
+ TPA_LRO,
+ TPA_GRO,
+ MAX_TPA_MODE};
+
+
+/*
* tpa update ramrod data
*/
struct tpa_update_ramrod_data {
@@ -4224,7 +4229,8 @@ struct tpa_update_ramrod_data {
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
- __le16 reserved1;
+ u8 dont_verify_rings_pause_thr_flg;
+ u8 tpa_mode;
__le16 sge_buff_size;
__le16 max_agg_size;
__le32 sge_page_base_lo;
@@ -4447,13 +4453,13 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_UNUSED,
RAMROD_CMD_ID_COMMON_FUNCTION_START,
RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+ RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
RAMROD_CMD_ID_COMMON_CFC_DEL,
RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
RAMROD_CMD_ID_COMMON_STAT_QUERY,
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_RESERVED1,
- RAMROD_CMD_ID_COMMON_RESERVED2,
MAX_COMMON_SPQE_CMD_ID
};
@@ -4733,8 +4739,8 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_MALICIOUS_VF,
EVENT_RING_OPCODE_FORWARD_SETUP,
EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+ EVENT_RING_OPCODE_FUNCTION_UPDATE,
EVENT_RING_OPCODE_RESERVED1,
- EVENT_RING_OPCODE_RESERVED2,
EVENT_RING_OPCODE_SET_MAC,
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 4d748e77d1ac..29f5c3cca31a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 7ec1724753ad..fe66d902dc62 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -69,12 +69,12 @@ static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
- else if (wb)
- /*
- * Wide bus registers with no dmae need to be written
- * using indirect write.
- */
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (wb && CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
@@ -99,8 +99,14 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
- else
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
@@ -177,8 +183,14 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
{
if (bp->dmae_ready)
VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
- else
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, data, len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, data, len);
}
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
@@ -840,25 +852,15 @@ static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
}
}
-static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u32 base_reg, u32 reg)
{
int i;
- u32 wb_data[2];
-
- wb_data[0] = wb_data[1] = 0;
-
+ u32 wb_data[2] = {0, 0};
for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
- REG_WR(bp, QM_REG_BASEADDR + i*4,
+ REG_WR(bp, base_reg + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
- bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
- wb_data, 2);
-
- if (CHIP_IS_E1H(bp)) {
- REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
- qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
- bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
- wb_data, 2);
- }
+ bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
}
}
@@ -873,7 +875,12 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
- bnx2x_qm_set_ptr_table(bp, qm_cid_count);
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+ QM_REG_BASEADDR, QM_REG_PTRTBL);
+ if (CHIP_IS_E1H(bp))
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+ QM_REG_BASEADDR_EXT_A,
+ QM_REG_PTRTBL_EXT_A);
break;
case INITOP_CLEAR:
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 2091e5dbbcdd..beb4cdbdb6e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2011 Broadcom Corporation
+/* Copyright 2008-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -1612,6 +1612,9 @@ static void bnx2x_umac_enable(struct link_params *params,
if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+ if (vars->duplex == DUPLEX_HALF)
+ val |= UMAC_COMMAND_CONFIG_REG_HD_ENA;
+
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
@@ -3635,45 +3638,50 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
}
-static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
- struct bnx2x *bp = params->bp;
u16 ld_pause; /* local */
u16 lp_pause; /* link partner */
u16 pause_result;
- u8 ret = 0;
- /* read twice */
+ struct bnx2x *bp = params->bp;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+ bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
+ bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
+}
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 ret = 0;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
- if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_ext_phy_update_adv_fc(phy, params, vars);
+ /* But set the flow-control result as the requested one */
vars->flow_ctrl = phy->req_flow_ctrl;
- else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ } else if (phy->req_line_speed != SPEED_AUTO_NEG)
vars->flow_ctrl = params->req_fc_auto_adv;
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
- bnx2x_cl22_read(bp, phy,
- 0x4, &ld_pause);
- bnx2x_cl22_read(bp, phy,
- 0x5, &lp_pause);
- } else {
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
- }
- pause_result = (ld_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
- pause_result |= (lp_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
- pause_result);
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_ext_phy_update_adv_fc(phy, params, vars);
}
return ret;
}
@@ -3785,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Enable Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
}
@@ -5216,22 +5224,69 @@ static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
return 0;
}
+static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ struct bnx2x *bp = params->bp;
+ if ((gp_status &
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result);
+ } else {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
+ }
+ bnx2x_pause_resolve(vars, pause_result);
+
+}
+
static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
- u16 ld_pause; /* local driver */
- u16 lp_pause; /* link partner */
- u16 pause_result;
-
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
/* resolve from gp_status in case of AN complete and not sgmii */
- if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_update_adv_fc(phy, params, vars, gp_status);
+ /* But set the flow-control result as the requested one */
vars->flow_ctrl = phy->req_flow_ctrl;
- else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ } else if (phy->req_line_speed != SPEED_AUTO_NEG)
vars->flow_ctrl = params->req_fc_auto_adv;
else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
(!(vars->phy_flags & PHY_SGMII_FLAG))) {
@@ -5239,45 +5294,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
vars->flow_ctrl = params->req_fc_auto_adv;
return;
}
- if ((gp_status &
- (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
- MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
- (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
- MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
-
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1,
- &ld_pause);
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1,
- &lp_pause);
- pause_result = (ld_pause &
- MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
- >> 8;
- pause_result |= (lp_pause &
- MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
- >> 10;
- DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
- pause_result);
- } else {
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
- &ld_pause);
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
- pause_result = (ld_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
- pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
- DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
- pause_result);
- }
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_update_adv_fc(phy, params, vars, gp_status);
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
@@ -5496,6 +5513,33 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
}
}
+ /* Read LP advertised speeds*/
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
+ u16 val;
+
+ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
@@ -5553,6 +5597,34 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
}
}
+ if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ u16 val;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ }
+
+
if (lane < 2) {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
@@ -5970,8 +6042,8 @@ static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
return 0;
}
-int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len)
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ u16 len)
{
struct bnx2x *bp;
u32 spirom_ver = 0;
@@ -6418,7 +6490,9 @@ static int bnx2x_update_link_down(struct link_params *params,
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
- LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
+ LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
+ LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
@@ -7367,6 +7441,19 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
bnx2x_8073_resolve_fc(phy, params, vars);
vars->duplex = DUPLEX_FULL;
}
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val1);
+
+ if (val1 & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val1 & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
return link_up;
}
@@ -9405,13 +9492,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
- u16 tmp_req_line_speed;
- tmp_req_line_speed = phy->req_line_speed;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- if (phy->req_line_speed == SPEED_10000)
- phy->req_line_speed = SPEED_AUTO_NEG;
- } else {
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -9555,8 +9637,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- phy->req_line_speed = tmp_req_line_speed;
-
return 0;
}
@@ -9948,6 +10028,42 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ /* Read LP advertised speeds */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1<<6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1<<8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1<<9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_1000T_STATUS, &val);
+
+ if (val & (1<<10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_MASTER_STATUS, &val);
+
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
return link_up;
@@ -10571,6 +10687,35 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
}
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ /* report LP advertised speeds */
+ bnx2x_cl22_read(bp, phy, 0x5, &val);
+
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1<<6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1<<8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1<<9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ bnx2x_cl22_read(bp, phy, 0xa, &val);
+ if (val & (1<<10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ }
}
return link_up;
}
@@ -10699,6 +10844,11 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
val2, (val2 & (1<<14)));
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ /* read LP advertised speeds */
+ if (val2 & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
return link_up;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index e02a68a7fb85..7ba557a610da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2011 Broadcom Corporation
+/* Copyright 2008-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -337,8 +337,8 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
-int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len);
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ u16 len);
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b69f8762b339..f7f9aa807264 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -375,9 +375,6 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
-
- DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
- idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
}
REG_WR(bp, dmae_reg_go_c[idx], 1);
}
@@ -442,10 +439,6 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
int rc = 0;
- DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
/*
* Lock the dmae channel. Disable BHs to prevent a dead-lock
* as long as this code is called both from syscall context and
@@ -462,9 +455,10 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
/* wait for completion */
udelay(5);
while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
- DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
- if (!cnt) {
+ if (!cnt ||
+ (bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
BNX2X_ERR("DMAE timeout!\n");
rc = DMAE_TIMEOUT;
goto unlock;
@@ -477,10 +471,6 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
rc = DMAE_PCI_ERROR;
}
- DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
unlock:
spin_unlock_bh(&bp->dmae_lock);
return rc;
@@ -494,9 +484,10 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
if (!bp->dmae_ready) {
u32 *data = bnx2x_sp(bp, wb_data[0]);
- DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
- " using indirect\n", dst_addr, len32);
- bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ if (CHIP_IS_E1(bp))
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ else
+ bnx2x_init_str_wr(bp, dst_addr, data, len32);
return;
}
@@ -524,10 +515,13 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
u32 *data = bnx2x_sp(bp, wb_data[0]);
int i;
- DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
- " using indirect\n", src_addr, len32);
- for (i = 0; i < len32; i++)
- data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ if (CHIP_IS_E1(bp))
+ for (i = 0; i < len32; i++)
+ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ else
+ for (i = 0; i < len32; i++)
+ data[i] = REG_RD(bp, src_addr + i*4);
+
return;
}
@@ -609,8 +603,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
XSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -637,8 +630,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
TSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -665,8 +657,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -693,8 +684,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
USTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -723,13 +713,23 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
- printk("%s" "MCP PC at 0x%x\n", lvl, val);
+ BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
if (BP_PATH(bp) == 0)
trace_shmem_base = bp->common.shmem_base;
else
trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
- addr = trace_shmem_base - 0x0800 + 4;
+ addr = trace_shmem_base - 0x800;
+
+ /* validate TRCB signature */
+ mark = REG_RD(bp, addr);
+ if (mark != MFW_TRACE_SIGNATURE) {
+ BNX2X_ERR("Trace buffer signature is missing.");
+ return ;
+ }
+
+ /* read cyclic buffer pointer */
+ addr += 4;
mark = REG_RD(bp, addr);
mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ ((mark + 0x3) & ~0x3) - 0x08000000;
@@ -768,14 +768,14 @@ void bnx2x_panic_dump(struct bnx2x *bp)
#endif
bp->stats_state = STATS_STATE_DISABLED;
+ bp->eth_stats.unrecoverable_error++;
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
BNX2X_ERR("begin crash dump -----------------\n");
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
- " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
bp->def_idx, bp->def_att_idx, bp->attn_state,
bp->spq_prod_idx, bp->stats_counter);
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
@@ -822,14 +822,11 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct bnx2x_fp_txdata txdata;
/* Rx */
- BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
- " rx_comp_prod(0x%x)"
- " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
- BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
- " fp_hc_idx(0x%x)\n",
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_hc_idx));
@@ -837,9 +834,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_cos_in_tx_queue(fp, cos)
{
txdata = fp->txdata[cos];
- BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
- " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
- " *tx_cons_sb(0x%x)\n",
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
txdata.tx_bd_cons,
@@ -881,9 +876,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
j * sizeof(u32));
if (!CHIP_IS_E1x(bp)) {
- pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
- "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
- "state(0x%x)\n",
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
sb_data_e2.common.p_func.pf_id,
sb_data_e2.common.p_func.vf_id,
sb_data_e2.common.p_func.vf_valid,
@@ -891,9 +884,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
sb_data_e2.common.same_igu_sb_1b,
sb_data_e2.common.state);
} else {
- pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
- "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
- "state(0x%x)\n",
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
sb_data_e1x.common.p_func.pf_id,
sb_data_e1x.common.p_func.vf_id,
sb_data_e1x.common.p_func.vf_valid,
@@ -904,21 +895,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* SB_SMs data */
for (j = 0; j < HC_SB_MAX_SM; j++) {
- pr_cont("SM[%d] __flags (0x%x) "
- "igu_sb_id (0x%x) igu_seg_id(0x%x) "
- "time_to_expire (0x%x) "
- "timer_value(0x%x)\n", j,
- hc_sm_p[j].__flags,
- hc_sm_p[j].igu_sb_id,
- hc_sm_p[j].igu_seg_id,
- hc_sm_p[j].time_to_expire,
- hc_sm_p[j].timer_value);
+ pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
+ j, hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
}
/* Indecies data */
for (j = 0; j < loop; j++) {
- pr_cont("INDEX[%d] flags (0x%x) "
- "timeout (0x%x)\n", j,
+ pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
hc_index_p[j].flags,
hc_index_p[j].timeout);
}
@@ -972,8 +959,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct sw_tx_bd *sw_bd =
&txdata->tx_buf_ring[j];
- BNX2X_ERR("fp%d: txdata %d, "
- "packet[%x]=[%p,%x]\n",
+ BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
i, cos, j, sw_bd->skb,
sw_bd->first_bd);
}
@@ -983,8 +969,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for (j = start; j != end; j = TX_BD(j + 1)) {
u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
- BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
- "[%x:%x:%x:%x]\n",
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
i, cos, j, tx_bd[0], tx_bd[1],
tx_bd[2], tx_bd[3]);
}
@@ -1003,8 +988,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
* initialization.
*/
#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
-#define FLR_WAIT_INTERAVAL 50 /* usec */
-#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+#define FLR_WAIT_INTERVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
struct pbf_pN_buf_regs {
int pN;
@@ -1037,7 +1022,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
(init_crd - crd_start))) {
if (cur_cnt--) {
- udelay(FLR_WAIT_INTERAVAL);
+ udelay(FLR_WAIT_INTERVAL);
crd = REG_RD(bp, regs->crd);
crd_freed = REG_RD(bp, regs->crd_freed);
} else {
@@ -1051,7 +1036,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
}
}
DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
- poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+ poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}
static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
@@ -1069,7 +1054,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
if (cur_cnt--) {
- udelay(FLR_WAIT_INTERAVAL);
+ udelay(FLR_WAIT_INTERVAL);
occup = REG_RD(bp, regs->lines_occup);
freed = REG_RD(bp, regs->lines_freed);
} else {
@@ -1083,7 +1068,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
}
}
DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
- poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+ poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}
static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
@@ -1093,7 +1078,7 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
u32 val;
while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
- udelay(FLR_WAIT_INTERAVAL);
+ udelay(FLR_WAIT_INTERVAL);
return val;
}
@@ -1206,7 +1191,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
int ret = 0;
if (REG_RD(bp, comp_addr)) {
- BNX2X_ERR("Cleanup complete is not 0\n");
+ BNX2X_ERR("Cleanup complete was not 0 before sending\n");
return 1;
}
@@ -1215,11 +1200,13 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
- DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+ DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
+ DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
+ (REG_RD(bp, comp_addr)));
ret = 1;
}
/* Zero completion for nxt FLR */
@@ -1330,6 +1317,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Poll HW usage counters */
+ DP(BNX2X_MSG_SP, "Polling usage counters\n");
if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
return -EBUSY;
@@ -1388,8 +1376,8 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
if (!CHIP_IS_E1(bp)) {
- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
+ DP(NETIF_MSG_IFUP,
+ "write %x to HC %d (addr 0x%x)\n", val, port, addr);
REG_WR(bp, addr, val);
@@ -1400,8 +1388,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
if (CHIP_IS_E1(bp))
REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
- val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+ DP(NETIF_MSG_IFUP,
+ "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
+ (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
REG_WR(bp, addr, val);
/*
@@ -1456,7 +1445,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
IGU_PF_CONF_SINGLE_ISR_EN);
}
- DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
+ DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
@@ -1514,7 +1503,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
HC_CONFIG_0_REG_INT_LINE_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
val, port, addr);
/* flush all outstanding writes */
@@ -1533,7 +1523,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN);
- DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
/* flush all outstanding writes */
mmiowb();
@@ -1592,11 +1582,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
- DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "Trying to take a lock on resource %d\n", resource);
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DP(NETIF_MSG_HW,
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return false;
@@ -1614,7 +1605,8 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
if (lock_status & resource_bit)
return true;
- DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "Failed to get a lock on resource %d\n", resource);
return false;
}
@@ -1675,7 +1667,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
break;
case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
- DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
break;
@@ -1817,8 +1809,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DP(NETIF_MSG_HW,
- "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return -EINVAL;
}
@@ -1833,7 +1824,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is not already taken */
lock_status = REG_RD(bp, hw_lock_control_reg);
if (lock_status & resource_bit) {
- DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
lock_status, resource_bit);
return -EEXIST;
}
@@ -1848,7 +1839,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
msleep(5);
}
- DP(NETIF_MSG_HW, "Timeout\n");
+ BNX2X_ERR("Timeout\n");
return -EAGAIN;
}
@@ -1864,12 +1855,9 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
- DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
-
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DP(NETIF_MSG_HW,
- "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return -EINVAL;
}
@@ -1884,7 +1872,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is currently taken */
lock_status = REG_RD(bp, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
lock_status, resource_bit);
return -EFAULT;
}
@@ -1945,7 +1933,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
switch (mode) {
case MISC_REGISTERS_GPIO_OUTPUT_LOW:
- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> output low\n",
gpio_num, gpio_shift);
/* clear FLOAT and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -1953,7 +1942,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
break;
case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> output high\n",
gpio_num, gpio_shift);
/* clear FLOAT and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -1961,7 +1951,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
break;
case MISC_REGISTERS_GPIO_INPUT_HI_Z:
- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> input\n",
gpio_num, gpio_shift);
/* set FLOAT */
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -2045,16 +2036,18 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
switch (mode) {
case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
- DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
- "output low\n", gpio_num, gpio_shift);
+ DP(NETIF_MSG_LINK,
+ "Clear GPIO INT %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
/* clear SET and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
break;
case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
- DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
- "output high\n", gpio_num, gpio_shift);
+ DP(NETIF_MSG_LINK,
+ "Set GPIO INT %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
/* clear CLR and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
@@ -2087,21 +2080,21 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
switch (mode) {
case MISC_REGISTERS_SPIO_OUTPUT_LOW:
- DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+ DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num);
/* clear FLOAT and set CLR */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
break;
case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
- DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+ DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num);
/* clear FLOAT and set SET */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
break;
case MISC_REGISTERS_SPIO_INPUT_HI_Z:
- DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+ DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num);
/* set FLOAT */
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
break;
@@ -2543,7 +2536,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
u32 val;
bp->port.pmf = 1;
- DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
/*
* We need the mb() to ensure the ordering between the writing to
@@ -2688,6 +2681,8 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
if (!fp->disable_tpa) {
__set_bit(BNX2X_Q_FLG_TPA, &flags);
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+ if (fp->mode == TPA_MODE_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
}
if (leading) {
@@ -2784,6 +2779,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
rxq_init->sge_buf_sz = sge_sz;
rxq_init->max_sges_pkt = max_sge;
rxq_init->rss_engine_id = BP_FUNC(bp);
+ rxq_init->mcast_engine_id = BP_FUNC(bp);
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
@@ -3121,12 +3117,12 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
* locks
*/
if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
- DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+ DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
bp->flags |= MF_FUNC_DIS;
bnx2x_e1h_disable(bp);
} else {
- DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+ DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
bp->flags &= ~MF_FUNC_DIS;
bnx2x_e1h_enable(bp);
@@ -3153,7 +3149,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
if (bp->spq_prod_bd == bp->spq_last_bd) {
bp->spq_prod_bd = bp->spq;
bp->spq_prod_idx = 0;
- DP(NETIF_MSG_TIMER, "end of spq\n");
+ DP(BNX2X_MSG_SP, "end of spq\n");
} else {
bp->spq_prod_bd++;
bp->spq_prod_idx++;
@@ -3222,8 +3218,10 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't post SP when there is panic\n");
return -EIO;
+ }
#endif
spin_lock_bh(&bp->spq_lock);
@@ -3270,9 +3268,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
atomic_dec(&bp->cq_spq_left);
- DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
- "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+ DP(BNX2X_MSG_SP,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
@@ -3464,9 +3461,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
ext_phy_config);
/* log the failure */
- netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
- " the driver to shutdown the card to prevent permanent"
- " damage. Please contact OEM Support for assistance\n");
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+ "Please contact OEM Support for assistance\n");
/*
* Scheudle device reset (unload)
@@ -3709,11 +3705,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
*/
void bnx2x_set_reset_global(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+ u32 val;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3723,11 +3719,11 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
*/
static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+ u32 val;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3750,15 +3746,17 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
*/
static inline void bnx2x_set_reset_done(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val;
u32 bit = BP_PATH(bp) ?
BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
/* Clear the bit */
val &= ~bit;
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3768,15 +3766,16 @@ static inline void bnx2x_set_reset_done(struct bnx2x *bp)
*/
void bnx2x_set_reset_in_progress(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val;
u32 bit = BP_PATH(bp) ?
BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
/* Set the bit */
val |= bit;
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3794,25 +3793,28 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
}
/*
- * Increment the load counter for the current engine.
+ * set pf load for the current pf.
*
* should be run under rtnl lock
*/
-void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_set_pf_load(struct bnx2x *bp)
{
- u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val1, val;
u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK;
u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
BNX2X_PATH0_LOAD_CNT_SHIFT;
- DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
/* get the current counter value */
val1 = (val & mask) >> shift;
- /* increment... */
- val1++;
+ /* set bit of that PF */
+ val1 |= (1 << bp->pf_num);
/* clear the old value */
val &= ~mask;
@@ -3821,34 +3823,35 @@ void bnx2x_inc_load_cnt(struct bnx2x *bp)
val |= ((val1 << shift) & mask);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/**
- * bnx2x_dec_load_cnt - decrement the load counter
+ * bnx2x_clear_pf_load - clear pf load mark
*
* @bp: driver handle
*
* Should be run under rtnl lock.
* Decrements the load counter for the current engine. Returns
- * the new counter value.
+ * whether other functions are still loaded
*/
-u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+bool bnx2x_clear_pf_load(struct bnx2x *bp)
{
- u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val1, val;
u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK;
u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
BNX2X_PATH0_LOAD_CNT_SHIFT;
- DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
/* get the current counter value */
val1 = (val & mask) >> shift;
- /* decrement... */
- val1--;
+ /* clear bit of that PF */
+ val1 &= ~(1 << bp->pf_num);
/* clear the old value */
val &= ~mask;
@@ -3857,18 +3860,16 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
val |= ((val1 << shift) & mask);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
-
- return val1;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ return val1 != 0;
}
/*
- * Read the load counter for the current engine.
+ * Read the load status for the current engine.
*
* should be run under rtnl lock
*/
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
{
u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3876,27 +3877,28 @@ static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
BNX2X_PATH0_LOAD_CNT_SHIFT);
u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
- DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
val = (val & mask) >> shift;
- DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
+ engine, val);
- return val;
+ return val != 0;
}
/*
- * Reset the load counter for the current engine.
- *
- * should be run under rtnl lock
+ * Reset the load status for the current engine.
*/
-static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+static inline void bnx2x_clear_load_status(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val;
u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
- BNX2X_PATH0_LOAD_CNT_MASK);
-
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
static inline void _print_next_block(int idx, const char *blk)
@@ -4168,9 +4170,8 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
(sig[3] & HW_PRTY_ASSERT_SET_3) ||
(sig[4] & HW_PRTY_ASSERT_SET_4)) {
int par_num = 0;
- DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
- "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
- "[4]:0x%08x\n",
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
sig[0] & HW_PRTY_ASSERT_SET_0,
sig[1] & HW_PRTY_ASSERT_SET_1,
sig[2] & HW_PRTY_ASSERT_SET_2,
@@ -4240,34 +4241,25 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "ADDRESS_ERROR\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "INCORRECT_RCV_BEHAVIOR\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "WAS_ERROR_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "VF_LENGTH_VIOLATION_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "VF_GRC_SPACE_VIOLATION_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "VF_MSIX_BAR_VIOLATION_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "TCPL_ERROR_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "TCPL_IN_TWO_RCBS_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "CSSNOOP_FIFO_OVERFLOW\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
}
if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
@@ -4275,19 +4267,15 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
- BNX2X_ERR("ATC_ATC_INT_STS_REG"
- "_ATC_TCPL_TO_NOT_PEND\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
- BNX2X_ERR("ATC_ATC_INT_STS_REG_"
- "ATC_GPA_MULTIPLE_HITS\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
- BNX2X_ERR("ATC_ATC_INT_STS_REG_"
- "ATC_RCPL_TO_EMPTY_CNT\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
- BNX2X_ERR("ATC_ATC_INT_STS_REG_"
- "ATC_IREQ_LESS_THAN_STU\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
}
if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
@@ -4346,8 +4334,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
if (deasserted & (1 << index)) {
group_mask = &bp->attn_group[index];
- DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
- "%08x %08x %08x\n",
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
index,
group_mask->sig[0], group_mask->sig[1],
group_mask->sig[2], group_mask->sig[3],
@@ -4507,6 +4494,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
if (cid == BNX2X_ISCSI_ETH_CID)
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
@@ -4516,6 +4504,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
break;
case BNX2X_FILTER_MCAST_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
/* This is only relevant for 57710 where multicast MACs are
* configured as unicast MACs using the same ramrod.
*/
@@ -4617,7 +4606,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+ DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
+ "got statistics comp event %d\n",
bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -4644,7 +4634,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
- DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+ DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_STOP))
break;
@@ -4652,21 +4642,23 @@ static void bnx2x_eq_int(struct bnx2x *bp)
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
- DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+ DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_START))
break;
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_START:
- DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_START ramrod\n");
if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
break;
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_STOP:
- DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_STOP ramrod\n");
if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
break;
@@ -4748,7 +4740,7 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+ DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
if (status & BNX2X_DEF_SB_ATT_IDX) {
@@ -4782,7 +4774,7 @@ static void bnx2x_sp_task(struct work_struct *work)
}
if (unlikely(status))
- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
status);
bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
@@ -5060,7 +5052,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
igu_sb_id, igu_seg_id);
- DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+ DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
/* write indecies to HW */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
@@ -5410,6 +5402,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init shortcut */
fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+
/* Setup SB indicies */
fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
@@ -5437,8 +5430,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
- "cl_id %d fw_sb %d igu_sb %d\n",
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
@@ -5525,8 +5517,7 @@ gunzip_nomem2:
bp->gunzip_buf = NULL;
gunzip_nomem1:
- netdev_err(bp->dev, "Cannot allocate firmware buffer for"
- " un-compression\n");
+ BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
return -ENOMEM;
}
@@ -5578,8 +5569,8 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- netdev_err(bp->dev, "Firmware decompression error:"
- " gunzip_outlen (%d) not aligned\n",
+ netdev_err(bp->dev,
+ "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
@@ -5998,7 +5989,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
{
u32 val;
- DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+ DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
* take the UNDI lock to protect undi_unload flow from accessing
@@ -6322,9 +6313,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- dev_alert(&bp->pdev->dev, "please adjust the size "
- "of cdu_context(%ld)\n",
- (long)sizeof(union cdu_context));
+ dev_alert(&bp->pdev->dev,
+ "please adjust the size of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
val = (4 << 24) + (0 << 12) + 1024;
@@ -6453,7 +6444,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x__link_reset(bp);
- DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
+ DP(NETIF_MSG_HW, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -6674,13 +6665,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
u16 cdu_ilt_start;
u32 addr, val;
u32 main_mem_base, main_mem_size, main_mem_prty_clr;
- int i, main_mem_width;
+ int i, main_mem_width, rc;
- DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
+ DP(NETIF_MSG_HW, "starting func init func %d\n", func);
/* FLR cleanup - hmmm */
- if (!CHIP_IS_E1x(bp))
- bnx2x_pf_flr_clnup(bp);
+ if (!CHIP_IS_E1x(bp)) {
+ rc = bnx2x_pf_flr_clnup(bp);
+ if (rc)
+ return rc;
+ }
/* set MSI reconfigure capability */
if (bp->common.int_block == INT_BLOCK_HC) {
@@ -6933,9 +6927,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
val = REG_RD(bp, main_mem_prty_clr);
if (val)
- DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
- "block during "
- "function init (0x%x)!\n", val);
+ DP(NETIF_MSG_HW,
+ "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
+ val);
/* Clear "false" parity errors in MSI-X table */
for (i = main_mem_base;
@@ -7063,6 +7057,7 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
alloc_mem_err:
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ BNX2X_ERR("Can't allocate memory\n");
return -ENOMEM;
}
@@ -7089,6 +7084,11 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
+#ifdef BCM_CNIC
+ /* write address to which L5 should insert its values */
+ bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
+#endif
+
/* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
@@ -7121,6 +7121,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
alloc_mem_err:
bnx2x_free_mem(bp);
+ BNX2X_ERR("Can't allocate memory\n");
return -ENOMEM;
}
@@ -7186,8 +7187,9 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
unsigned long ramrod_flags = 0;
#ifdef BCM_CNIC
- if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
- DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+ if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) {
+ DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
+ "Ignoring Zero MAC for STORAGE SD mode\n");
return 0;
}
#endif
@@ -7220,14 +7222,13 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
/* falling through... */
case INT_MODE_INTx:
bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
- DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+ BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
/* Set number of queues according to bp->multi_mode value */
bnx2x_set_num_queues(bp);
- DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
- bp->num_queues);
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
@@ -7235,13 +7236,9 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
*/
if (bnx2x_enable_msix(bp)) {
/* failed to enable MSI-X */
- if (bp->multi_mode)
- DP(NETIF_MSG_IFUP,
- "Multi requested but failed to "
- "enable MSI-X (%d), "
- "set number of queues to %d\n",
- bp->num_queues,
- 1 + NON_ETH_CONTEXT_USE);
+ BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+
bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
/* Try to enable MSI */
@@ -7279,8 +7276,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
#endif
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7301,8 +7297,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP,
+ "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7320,8 +7316,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
line += SRC_ILT_LINES;
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP,
+ "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7342,8 +7338,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
line += TM_ILT_LINES;
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP,
+ "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7404,7 +7400,7 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
/* set maximum number of COSs supported by this queue */
init_params->max_cos = fp->max_cos;
- DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
+ DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
fp->index, init_params->max_cos);
/* set the context pointers queue object */
@@ -7435,9 +7431,8 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Set Tx TX_ONLY_SETUP parameters */
bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
- DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
- "cos %d, primary cid %d, cid %d, "
- "client id %d, sp-client id %d, flags %lx\n",
+ DP(NETIF_MSG_IFUP,
+ "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
tx_only_params->gen_params.spcl_id, tx_only_params->flags);
@@ -7461,7 +7456,7 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool leading)
{
- struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_setup_params *setup_params =
&q_params.params.setup;
struct bnx2x_queue_setup_tx_only_params *tx_only_params =
@@ -7469,7 +7464,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int rc;
u8 tx_index;
- DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
+ DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
/* reset IGU state skip FCoE L2 queue */
if (!IS_FCOE_FP(fp))
@@ -7493,7 +7488,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
- DP(BNX2X_MSG_SP, "init complete\n");
+ DP(NETIF_MSG_IFUP, "init complete\n");
/* Now move the Queue to the SETUP state... */
@@ -7544,10 +7539,10 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
struct bnx2x_fp_txdata *txdata;
- struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_state_params q_params = {NULL};
int rc, tx_index;
- DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
+ DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
q_params.q_obj = &fp->q_obj;
/* We want to wait for completion in this context */
@@ -7562,7 +7557,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
/* ascertain this is a normal queue*/
txdata = &fp->txdata[tx_index];
- DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
+ DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
/* send halt terminate on tx-only connection */
@@ -7720,7 +7715,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -7735,7 +7730,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
static inline int bnx2x_func_stop(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
int rc;
/* Prepare parameters for function state transitions */
@@ -7754,8 +7749,7 @@ static inline int bnx2x_func_stop(struct bnx2x *bp)
#ifdef BNX2X_STOP_ON_ERROR
return rc;
#else
- BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
- "transaction\n");
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
__set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
return bnx2x_func_state_change(bp, &func_params);
#endif
@@ -7818,14 +7812,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else {
int path = BP_PATH(bp);
- DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
- "%d, %d, %d\n",
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]--;
load_count[path][1 + port]--;
- DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
- "%d, %d, %d\n",
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 0)
@@ -7888,16 +7880,17 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp)
if (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED) {
#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("Wrong function state\n");
return -EBUSY;
#else
/*
* Failed to complete the transaction in a "good way"
* Force both transactions with CLR bit
*/
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
- DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
- "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+ DP(NETIF_MSG_IFDOWN,
+ "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
func_params.f_obj = &bp->func_obj;
__set_bit(RAMROD_DRV_CLR_ONLY,
@@ -7921,7 +7914,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
int port = BP_PORT(bp);
int i, rc = 0;
u8 cos;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
u32 reset_code;
/* Wait until tx fastpath tasks complete */
@@ -7948,8 +7941,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
- BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
- "%d\n", rc);
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
+ rc);
/* Disable LLH */
if (!CHIP_IS_E1(bp))
@@ -8042,7 +8035,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
{
u32 val;
- DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+ DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
if (CHIP_IS_E1(bp)) {
int port = BP_PORT(bp);
@@ -8095,7 +8088,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
(val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
}
- DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
mmiowb();
}
@@ -8137,7 +8130,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
u32 shmem;
u32 validity_offset;
- DP(NETIF_MSG_HW, "Starting\n");
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
/* Set `magic' bit in order to save MF config */
if (!CHIP_IS_E1(bp))
@@ -8374,12 +8367,8 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
} while (cnt-- > 0);
if (cnt <= 0) {
- DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
- " are still"
- " outstanding read requests after 1s!\n");
- DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
- " port_is_idle_0=0x%08x,"
- " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
+ BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
pgl_exp_rom2);
return -EAGAIN;
@@ -8445,13 +8434,38 @@ int bnx2x_leader_reset(struct bnx2x *bp)
{
int rc = 0;
bool global = bnx2x_reset_is_global(bp);
+ u32 load_code;
+
+ /* if not going to reset MCP - load "fake" driver to reset HW while
+ * driver is owner of the HW
+ */
+ if (!global && !BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+ (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+ BNX2X_ERR("MCP unexpected resp, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+ }
/* Try to recover after the failure */
if (bnx2x_process_kill(bp, global)) {
- netdev_err(bp->dev, "Something bad had happen on engine %d! "
- "Aii!\n", BP_PATH(bp));
+ BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
+ BP_PATH(bp));
rc = -EAGAIN;
- goto exit_leader_reset;
+ goto exit_leader_reset2;
}
/*
@@ -8462,6 +8476,12 @@ int bnx2x_leader_reset(struct bnx2x *bp)
if (global)
bnx2x_clear_reset_global(bp);
+exit_leader_reset2:
+ /* unload "fake driver" if it was loaded */
+ if (!global && !BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
exit_leader_reset:
bp->is_leader = 0;
bnx2x_release_leader_lock(bp);
@@ -8498,13 +8518,16 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp)
static void bnx2x_parity_recover(struct bnx2x *bp)
{
bool global = false;
+ u32 error_recovered, error_unrecovered;
+ bool is_parity;
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
case BNX2X_RECOVERY_INIT:
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
- bnx2x_chk_parity_attn(bp, &global, false);
+ is_parity = bnx2x_chk_parity_attn(bp, &global, false);
+ WARN_ON(!is_parity);
/* Try to get a LEADER_LOCK HW lock */
if (bnx2x_trylock_leader_lock(bp)) {
@@ -8528,15 +8551,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
bp->recovery_state = BNX2X_RECOVERY_WAIT;
- /*
- * Reset MCP command sequence number and MCP mail box
- * sequence as we are going to reset the MCP.
- */
- if (global) {
- bp->fw_seq = 0;
- bp->fw_drv_pulse_wr_seq = 0;
- }
-
/* Ensure "is_leader", MCP command sequence and
* "recovery_state" update values are seen on other
* CPUs.
@@ -8548,10 +8562,10 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
if (bp->is_leader) {
int other_engine = BP_PATH(bp) ? 0 : 1;
- u32 other_load_counter =
- bnx2x_get_load_cnt(bp, other_engine);
- u32 load_counter =
- bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ bool other_load_status =
+ bnx2x_get_load_status(bp, other_engine);
+ bool load_status =
+ bnx2x_get_load_status(bp, BP_PATH(bp));
global = bnx2x_reset_is_global(bp);
/*
@@ -8562,8 +8576,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
* the the gates will remain closed for that
* engine.
*/
- if (load_counter ||
- (global && other_load_counter)) {
+ if (load_status ||
+ (global && other_load_status)) {
/* Wait until all other functions get
* down.
*/
@@ -8620,13 +8634,32 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
return;
}
- if (bnx2x_nic_load(bp, LOAD_NORMAL))
- bnx2x_recovery_failed(bp);
- else {
+ error_recovered =
+ bp->eth_stats.recoverable_error;
+ error_unrecovered =
+ bp->eth_stats.unrecoverable_error;
+ bp->recovery_state =
+ BNX2X_RECOVERY_NIC_LOADING;
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ error_unrecovered++;
+ netdev_err(bp->dev,
+ "Recovery failed. Power cycle needed\n");
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Shut down the power */
+ bnx2x_set_power_state(
+ bp, PCI_D3hot);
+ smp_mb();
+ } else {
bp->recovery_state =
BNX2X_RECOVERY_DONE;
+ error_recovered++;
smp_mb();
}
+ bp->eth_stats.recoverable_error =
+ error_recovered;
+ bp->eth_stats.unrecoverable_error =
+ error_unrecovered;
return;
}
@@ -8637,6 +8670,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
+static int bnx2x_close(struct net_device *dev);
+
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
* scheduled on a general queue in order to prevent a dead lock.
*/
@@ -8651,8 +8686,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
/* if stop on error is defined no recovery flows should be executed */
#ifdef BNX2X_STOP_ON_ERROR
- BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
- "so reset not done to allow debug dump,\n"
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
"you will need to reboot when done\n");
goto sp_rtnl_not_reset;
#endif
@@ -8695,7 +8729,7 @@ sp_rtnl_not_reset:
* damage
*/
if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+ DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
}
@@ -8782,11 +8816,13 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
{
u32 val;
- /* Check if there is any driver already loaded */
- val = REG_RD(bp, MISC_REG_UNPREPARED);
- if (val == 0x1) {
+ /* possibly another driver is trying to reset the chip */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ /* check if doorbell queue is reset */
+ if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET)
+ & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
/*
* Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
@@ -8874,14 +8910,11 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* restore our func and fw_seq */
bp->pf_num = orig_pf_num;
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
}
-
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
@@ -8924,6 +8957,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->pfid = bp->pf_num; /* 0..7 */
}
+ BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
+
bp->link_params.chip_id = bp->common.chip_id;
BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
@@ -8981,8 +9016,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERR("This driver needs bc_ver %X but found %X, "
- "please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
+ BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
@@ -9123,8 +9158,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
}
if (!(bp->port.supported[0] || bp->port.supported[1])) {
- BNX2X_ERR("NVRAM config error. BAD phy config."
- "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
SHMEM_RD(bp,
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(bp,
@@ -9212,6 +9246,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
SPEED_AUTO_NEG;
bp->port.advertising[idx] |=
bp->port.supported[idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bp->port.advertising[idx] |=
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
} else {
/* force 10G, no AN */
bp->link_params.req_line_speed[idx] =
@@ -9231,9 +9270,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9250,9 +9287,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9268,9 +9303,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9288,9 +9321,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9306,9 +9337,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9324,9 +9353,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9342,9 +9369,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9355,8 +9380,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
break;
default:
- BNX2X_ERR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
+ BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
link_config);
bp->link_params.req_line_speed[idx] =
SPEED_AUTO_NEG;
@@ -9374,8 +9398,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
BNX2X_FLOW_CTRL_NONE;
}
- BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
- " 0x%x advertising 0x%x\n",
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
bp->link_params.req_line_speed[idx],
bp->link_params.req_duplex[idx],
bp->link_params.req_flow_ctrl[idx],
@@ -9424,8 +9447,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
- BNX2X_DEV_INFO("lane_config 0x%08x "
- "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
+ BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
bp->port.link_config[0]);
@@ -9467,6 +9489,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
+ u32 no_flags = NO_ISCSI_FLAG;
#ifdef BCM_CNIC
int port = BP_PORT(bp);
@@ -9486,12 +9509,28 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
* disable the feature.
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
- bp->flags |= NO_ISCSI_FLAG;
+ bp->flags |= no_flags;
#else
- bp->flags |= NO_ISCSI_FLAG;
+ bp->flags |= no_flags;
#endif
}
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+{
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
+}
+#endif
static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
{
#ifdef BCM_CNIC
@@ -9534,24 +9573,11 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
* Read the WWN info only if the FCoE feature is enabled for
* this function.
*/
- if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
- /* Port info */
- bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_port_name_upper);
- bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_port_name_lower);
-
- /* Node info */
- bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_node_name_upper);
- bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_node_name_lower);
- }
- }
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+ bnx2x_get_ext_wwn_info(bp, func);
+
+ } else if (IS_MF_FCOE_SD(bp))
+ bnx2x_get_ext_wwn_info(bp, func);
BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
@@ -9592,7 +9618,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
- random_ether_addr(bp->dev->dev_addr);
+ eth_hw_addr_random(bp->dev);
} else if (IS_MF(bp)) {
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
@@ -9604,8 +9630,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/*
* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
+ *
+ * In non SD mode features configuration comes from
+ * struct func_ext_config.
*/
- if (IS_MF_SI(bp)) {
+ if (!IS_MF_SD(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
@@ -9629,16 +9658,25 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
} else
bp->flags |= NO_FCOE_FLAG;
- } else { /* SD mode */
- if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
- /* use primary mac as iscsi mac */
- memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+ } else { /* SD MODE */
+ if (IS_MF_STORAGE_SD(bp)) {
+ if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ } else { /* FCoE */
+ memcpy(fip_mac, bp->dev->dev_addr,
+ ETH_ALEN);
+ BNX2X_DEV_INFO("SD FCoE MODE\n");
+ BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
+ fip_mac);
+ }
/* Zero primary MAC configuration */
memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- BNX2X_DEV_INFO("SD ISCSI MODE\n");
- BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
- iscsi_mac);
}
}
#endif
@@ -9667,10 +9705,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- /* Set the FCoE MAC in MF_SD mode */
- if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
- memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
-
/* Disable iSCSI if MAC configuration is
* invalid.
*/
@@ -9690,10 +9724,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
- "bad Ethernet MAC address configuration: "
- "%pM, change it manually before bringing up "
- "the appropriate network interface\n",
+ "bad Ethernet MAC address configuration: %pM\n"
+ "change it manually before bringing up the appropriate network interface\n",
bp->dev->dev_addr);
+
+
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -9814,8 +9849,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_config[vn] = MF_CFG_RD(bp,
func_mf_config[func].config);
} else
- BNX2X_DEV_INFO("illegal MAC address "
- "for SI\n");
+ BNX2X_DEV_INFO("illegal MAC address for SI\n");
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
/* get OV configuration */
@@ -9833,7 +9867,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
- BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+ BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
}
}
@@ -9848,25 +9882,24 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = val;
bp->path_has_ovlan = true;
- BNX2X_DEV_INFO("MF OV for func %d is %d "
- "(0x%04x)\n", func, bp->mf_ov,
- bp->mf_ov);
+ BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
+ func, bp->mf_ov, bp->mf_ov);
} else {
dev_err(&bp->pdev->dev,
- "No valid MF OV for func %d, "
- "aborting\n", func);
+ "No valid MF OV for func %d, aborting\n",
+ func);
return -EPERM;
}
break;
case MULTI_FUNCTION_SI:
- BNX2X_DEV_INFO("func %d is in MF "
- "switch-independent mode\n", func);
+ BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
+ func);
break;
default:
if (vn) {
dev_err(&bp->pdev->dev,
- "VN %d is in a single function mode, "
- "aborting\n", vn);
+ "VN %d is in a single function mode, aborting\n",
+ vn);
return -EPERM;
}
break;
@@ -9902,16 +9935,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_info(bp);
- /* Get current FW pulse sequence */
- if (!BP_NOMCP(bp)) {
- int mb_idx = BP_FW_MB_IDX(bp);
-
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
- }
-
return rc;
}
@@ -10080,35 +10103,26 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
- /* init fw_seq after undi_unload! */
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
-
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- dev_err(&bp->pdev->dev, "MCP disabled, "
- "must load devices in order!\n");
+ dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->multi_mode = multi_mode;
bp->disable_tpa = disable_tpa;
#ifdef BCM_CNIC
- bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+ bp->disable_tpa |= IS_MF_STORAGE_SD(bp);
#endif
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->flags &= ~TPA_ENABLE_FLAG;
+ bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
bp->dev->features &= ~NETIF_F_LRO;
} else {
- bp->flags |= TPA_ENABLE_FLAG;
+ bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
bp->dev->features |= NETIF_F_LRO;
}
@@ -10150,6 +10164,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
+
return rc;
}
@@ -10168,14 +10184,16 @@ static int bnx2x_open(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
bool global = false;
int other_engine = BP_PATH(bp) ? 0 : 1;
- u32 other_load_counter, load_counter;
+ bool other_load_status, load_status;
+
+ bp->stats_init = true;
netif_carrier_off(dev);
bnx2x_set_power_state(bp, PCI_D0);
- other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
- load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
/*
* If parity had happen during the unload, then attentions
@@ -10201,8 +10219,8 @@ static int bnx2x_open(struct net_device *dev)
* global blocks only the first in the chip should try
* to recover.
*/
- if ((!load_counter &&
- (!global || !other_load_counter)) &&
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
bnx2x_trylock_leader_lock(bp) &&
!bnx2x_leader_reset(bp)) {
netdev_info(bp->dev, "Recovered in open\n");
@@ -10213,10 +10231,8 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D3hot);
bp->recovery_state = BNX2X_RECOVERY_FAILED;
- netdev_err(bp->dev, "Recovery flow hasn't been properly"
- " completed yet. Try again later. If u still see this"
- " message after a few retries then power cycle is"
- " required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
return -EAGAIN;
} while (0);
@@ -10226,7 +10242,7 @@ static int bnx2x_open(struct net_device *dev)
}
/* called with rtnl_lock */
-int bnx2x_close(struct net_device *dev)
+static int bnx2x_close(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10315,7 +10331,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
static inline int bnx2x_set_mc_list(struct bnx2x *bp)
{
struct net_device *dev = bp->dev;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
int rc = 0;
rparam.mcast_obj = &bp->mcast_obj;
@@ -10323,8 +10339,7 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
/* first, clear all configured multicast MACs */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0) {
- BNX2X_ERR("Failed to clear multicast "
- "configuration: %d\n", rc);
+ BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
return rc;
}
@@ -10332,8 +10347,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
if (netdev_mc_count(dev)) {
rc = bnx2x_init_mcast_macs_list(bp, &rparam);
if (rc) {
- BNX2X_ERR("Failed to create multicast MACs "
- "list: %d\n", rc);
+ BNX2X_ERR("Failed to create multicast MACs list: %d\n",
+ rc);
return rc;
}
@@ -10341,8 +10356,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
rc = bnx2x_config_mcast(bp, &rparam,
BNX2X_MCAST_CMD_ADD);
if (rc < 0)
- BNX2X_ERR("Failed to set a new multicast "
- "configuration: %d\n", rc);
+ BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+ rc);
bnx2x_free_mcast_macs_list(&rparam);
}
@@ -10426,8 +10441,9 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
struct bnx2x *bp = netdev_priv(netdev);
int rc;
- DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
- " value 0x%x\n", prtad, devad, addr, value);
+ DP(NETIF_MSG_LINK,
+ "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
+ prtad, devad, addr, value);
/* The HW expects different devad if CL22 is used */
devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
@@ -10468,8 +10484,10 @@ static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+ if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
+ BNX2X_ERR("Non-valid Ethernet address\n");
return -EADDRNOTAVAIL;
+ }
return 0;
}
@@ -10503,8 +10521,7 @@ static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, "
- "aborting\n");
+ dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
return -EIO;
}
} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
@@ -10521,6 +10538,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
{
struct bnx2x *bp;
int rc;
+ u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
board_type == BCM57711 ||
board_type == BCM57711E);
@@ -10531,7 +10549,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->dev = dev;
bp->pdev = pdev;
bp->flags = 0;
- bp->pf_num = PCI_FUNC(pdev->devfn);
rc = pci_enable_device(pdev);
if (rc) {
@@ -10575,7 +10592,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
}
if (!pci_is_pcie(pdev)) {
- dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -10598,6 +10615,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_release;
}
+ /* In E1/E1H use pci device function given by kernel.
+ * In E2/E3 read physical function from ME register since these chips
+ * support Physical Device Assignment where kernel BDF maybe arbitrary
+ * (depending on hypervisor).
+ */
+ if (chip_is_e1x)
+ bp->pf_num = PCI_FUNC(pdev->devfn);
+ else {/* chip is E2/3*/
+ pci_read_config_dword(bp->pdev,
+ PCICFG_ME_REGISTER, &pci_cfg_dword);
+ bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ }
+ BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
+
bnx2x_set_power_state(bp, PCI_D0);
/* clean indirect addresses */
@@ -10627,7 +10659,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Reset the load counter */
- bnx2x_clear_load_cnt(bp);
+ bnx2x_clear_load_status(bp);
dev->watchdog_timeo = TX_TIMEOUT;
@@ -10637,8 +10669,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->priv_flags |= IFF_UNICAST_FLT;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
- NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+ NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -10697,8 +10730,10 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
int i;
const u8 *fw_ver;
- if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
+ BNX2X_ERR("Wrong FW size\n");
return -EINVAL;
+ }
fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
sections = (struct bnx2x_fw_file_section *)fw_hdr;
@@ -10709,8 +10744,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- dev_err(&bp->pdev->dev,
- "Section %d length is out of bounds\n", i);
+ BNX2X_ERR("Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -10722,8 +10756,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- dev_err(&bp->pdev->dev,
- "Section offset %d is out of bounds\n", i);
+ BNX2X_ERR("Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -10735,10 +10768,9 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- dev_err(&bp->pdev->dev,
- "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2],
- fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+ BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
BCM_5710_FW_REVISION_VERSION,
BCM_5710_FW_ENGINEERING_VERSION);
@@ -10814,15 +10846,13 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
do { \
u32 len = be32_to_cpu(fw_hdr->arr.len); \
bp->arr = kmalloc(len, GFP_KERNEL); \
- if (!bp->arr) { \
- pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ if (!bp->arr) \
goto lbl; \
- } \
func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
(u8 *)bp->arr, len); \
} while (0)
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -11054,14 +11084,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
- if (!dev) {
- dev_err(&pdev->dev, "Cannot allocate net device\n");
+ if (!dev)
return -ENOMEM;
- }
bp = netdev_priv(dev);
- DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
tx_count, rx_count);
bp->igu_sb_cnt = max_non_def_sbs;
@@ -11074,7 +11102,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return rc;
}
- DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
rc = bnx2x_init_bp(bp);
if (rc)
@@ -11129,7 +11157,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
pcie_width,
@@ -11263,29 +11292,11 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
- bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
- bp->link_params.shmem_base = bp->common.shmem_base;
- BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
-
- if (!bp->common.shmem_base ||
- (bp->common.shmem_base < 0xA0000) ||
- (bp->common.shmem_base >= 0xC0000)) {
- BNX2X_DEV_INFO("MCP not active\n");
- bp->flags |= NO_MCP_FLAG;
- return;
- }
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
BNX2X_ERR("BAD MCP validity signature\n");
-
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
}
/**
@@ -11366,8 +11377,7 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- netdev_err(bp->dev, "Handling parity error recovery. "
- "Try again later\n");
+ netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
return;
}
@@ -11518,7 +11528,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
spe = bnx2x_sp_get_next(bp);
*spe = *bp->cnic_kwq_cons;
- DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+ DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
@@ -11537,10 +11547,18 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
int i;
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't post to SP queue while panic\n");
return -EIO;
+ }
#endif
+ if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
+ (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
spin_lock_bh(&bp->spq_lock);
for (i = 0; i < count; i++) {
@@ -11553,7 +11571,7 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
bp->cnic_kwq_pending++;
- DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+ DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
spe->hdr.conn_and_cmd_data, spe->hdr.type,
spe->data.update_data_addr.hi,
spe->data.update_data_addr.lo,
@@ -11834,8 +11852,10 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
- if (ops == NULL)
+ if (ops == NULL) {
+ BNX2X_ERR("NULL ops received\n");
return -EINVAL;
+ }
bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!bp->cnic_kwq)
@@ -11918,8 +11938,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
if (NO_FCOE(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
- DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
- "starting cid %d\n",
+ BNX2X_DEV_INFO(
+ "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
cp->ctx_blk_size,
cp->ctx_tbl_offset,
cp->ctx_tbl_len,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index dddbcf6e154e..fd7fb4581849 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -4812,6 +4812,7 @@
The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
header pointer. */
#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_HD_ENA (0x1<<10)
#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
@@ -5731,6 +5732,7 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19)
#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
@@ -5783,15 +5785,17 @@
#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
-#define HW_LOCK_DRV_FLAGS 10
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
+#define HW_LOCK_RESOURCE_NVRAM 12
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
-#define HW_LOCK_RESOURCE_SPIO 2
+#define HW_LOCK_RESOURCE_RECOVERY_REG 11
#define HW_LOCK_RESOURCE_RESET 5
+#define HW_LOCK_RESOURCE_SPIO 2
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6023,7 +6027,8 @@
#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
#define PCICFG_GRC_ADDRESS 0x78
-#define PCICFG_GRC_DATA 0x80
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_ME_REGISTER 0x98
#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
@@ -6401,6 +6406,7 @@
#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
#define MDIO_REG_BANK_RX0 0x80b0
#define MDIO_RX0_RX_STATUS 0x10
@@ -6794,14 +6800,16 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
#define MDIO_AN_REG_ADV 0x0011
#define MDIO_AN_REG_ADV2 0x0012
-#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
#define MDIO_AN_REG_CL37_AN 0xffe0
#define MDIO_AN_REG_CL37_FC_LD 0xffe4
-#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_1000T_STATUS 0xffea
#define MDIO_AN_REG_8073_2_5G 0x8329
#define MDIO_AN_REG_8073_BAM 0x8350
@@ -6966,6 +6974,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 94110e9ce51d..3f52fadee3ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
/* bnx2x_sp.c: Broadcom Everest network driver.
*
- * Copyright 2011 Broadcom Corporation
+ * Copyright (c) 2011-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -72,8 +72,8 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
o->execute = exec;
o->get = get;
- DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
- "length of %d\n", exe_len);
+ DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
+ exe_len);
}
static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
@@ -203,8 +203,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
if (!list_empty(&o->pending_comp)) {
if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
- "resetting pending_comp\n");
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
spin_unlock_bh(&o->lock);
@@ -476,11 +475,14 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
}
/* check_add() callbacks */
-static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+static int bnx2x_check_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
+
if (!is_valid_ether_addr(data->mac.mac))
return -EINVAL;
@@ -492,11 +494,14 @@ static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
return 0;
}
-static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+static int bnx2x_check_vlan_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
+
list_for_each_entry(pos, &o->head, link)
if (data->vlan.vlan == pos->u.vlan.vlan)
return -EEXIST;
@@ -504,11 +509,15 @@ static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
@@ -521,11 +530,14 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+ bnx2x_check_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
+
list_for_each_entry(pos, &o->head, link)
if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
return pos;
@@ -534,11 +546,14 @@ static struct bnx2x_vlan_mac_registry_elem *
}
static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+ bnx2x_check_vlan_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
+
list_for_each_entry(pos, &o->head, link)
if (data->vlan.vlan == pos->u.vlan.vlan)
return pos;
@@ -547,11 +562,15 @@ static struct bnx2x_vlan_mac_registry_elem *
}
static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+ bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
@@ -562,7 +581,8 @@ static struct bnx2x_vlan_mac_registry_elem *
}
/* check_move() callback */
-static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+static bool bnx2x_check_move(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
struct bnx2x_vlan_mac_obj *dst_o,
union bnx2x_classification_ramrod_data *data)
{
@@ -572,10 +592,10 @@ static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
/* Check if we can delete the requested configuration from the first
* object.
*/
- pos = src_o->check_del(src_o, data);
+ pos = src_o->check_del(bp, src_o, data);
/* check if configuration can be added */
- rc = dst_o->check_add(dst_o, data);
+ rc = dst_o->check_add(bp, dst_o, data);
/* If this classification can not be added (is already set)
* or can't be deleted - return an error.
@@ -587,6 +607,7 @@ static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
}
static bool bnx2x_check_move_always_err(
+ struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
struct bnx2x_vlan_mac_obj *dst_o,
union bnx2x_classification_ramrod_data *data)
@@ -611,12 +632,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-/* LLH CAM line allocations */
-enum {
- LLH_CAM_ISCSI_ETH_LINE = 0,
- LLH_CAM_ETH_LINE,
- LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
-};
static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
bool add, unsigned char *dev_addr, int index)
@@ -625,7 +640,7 @@ static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
NIG_REG_LLH0_FUNC_MEM;
- if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
return;
DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -731,9 +746,10 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
if (cmd != BNX2X_VLAN_MAC_MOVE) {
if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
bnx2x_set_mac_in_nig(bp, add, mac,
- LLH_CAM_ISCSI_ETH_LINE);
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE);
else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
- bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ BNX2X_LLH_CAM_ETH_LINE);
}
/* Reset the ramrod data buffer for the first rule */
@@ -745,7 +761,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
&rule_entry->mac.header);
DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
- add ? "add" : "delete", mac, raw->cl_id);
+ (add ? "add" : "delete"), mac, raw->cl_id);
/* Set a MAC itself */
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
@@ -838,7 +854,7 @@ static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
cfg_entry);
DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
- add ? "setting" : "clearing",
+ (add ? "setting" : "clearing"),
mac, raw->cl_id, cam_offset);
}
@@ -869,7 +885,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
/* Reset the ramrod data buffer */
memset(config, 0, sizeof(*config));
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
cam_offset, add,
elem->cmd_data.vlan_mac.u.mac.mac, 0,
ETH_VLAN_FILTER_ANY_VLAN, config);
@@ -1157,10 +1173,9 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
int rc;
/* Check the registry */
- rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+ rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
if (rc) {
- DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
- "current registry state\n");
+ DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
return rc;
}
@@ -1211,10 +1226,9 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
/* If this classification can not be deleted (doesn't exist)
* - return a BNX2X_EXIST.
*/
- pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
if (!pos) {
- DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
- "current registry state\n");
+ DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
return -EEXIST;
}
@@ -1274,9 +1288,9 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
* Check if we can perform this operation based on the current registry
* state.
*/
- if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
- DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
- "current registry state\n");
+ if (!src_o->check_move(bp, src_o, dest_o,
+ &elem->cmd_data.vlan_mac.u)) {
+ DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
return -EINVAL;
}
@@ -1290,8 +1304,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
/* Check DEL on source */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
if (src_exeq->get(src_exeq, &query_elem)) {
- BNX2X_ERR("There is a pending DEL command on the source "
- "queue already\n");
+ BNX2X_ERR("There is a pending DEL command on the source queue already\n");
return -EINVAL;
}
@@ -1304,8 +1317,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
/* Check ADD on destination */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
if (dest_exeq->get(dest_exeq, &query_elem)) {
- BNX2X_ERR("There is a pending ADD command on the "
- "destination queue already\n");
+ BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
return -EINVAL;
}
@@ -1480,12 +1492,10 @@ static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
&pos->cmd_data.vlan_mac.vlan_mac_flags)) {
if ((query.cmd_data.vlan_mac.cmd ==
BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
- BNX2X_ERR("Failed to return the credit for the "
- "optimized ADD command\n");
+ BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
return -EINVAL;
} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
- BNX2X_ERR("Failed to recover the credit from "
- "the optimized DEL command\n");
+ BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
return -EINVAL;
}
}
@@ -1551,7 +1561,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
reg_elem->vlan_mac_flags =
elem->cmd_data.vlan_mac.vlan_mac_flags;
} else /* DEL, RESTORE */
- reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
*re = reg_elem;
return 0;
@@ -1649,7 +1659,8 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
cmd = elem->cmd_data.vlan_mac.cmd;
if ((cmd == BNX2X_VLAN_MAC_DEL) ||
(cmd == BNX2X_VLAN_MAC_MOVE)) {
- reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ reg_elem = o->check_del(bp, o,
+ &elem->cmd_data.vlan_mac.u);
WARN_ON(!reg_elem);
@@ -1680,7 +1691,7 @@ error_exit:
if (!restore &&
((cmd == BNX2X_VLAN_MAC_ADD) ||
(cmd == BNX2X_VLAN_MAC_MOVE))) {
- reg_elem = o->check_del(cam_obj,
+ reg_elem = o->check_del(bp, cam_obj,
&elem->cmd_data.vlan_mac.u);
if (reg_elem) {
list_del(&reg_elem->link);
@@ -1755,8 +1766,7 @@ int bnx2x_config_vlan_mac(
rc = 1;
if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
- "clearing a pending bit.\n");
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
raw->clear_pending(raw);
}
@@ -1836,6 +1846,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
+ spin_unlock_bh(&exeq->lock);
return rc;
}
list_del(&exeq_pos->link);
@@ -2153,12 +2164,10 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
mac_filters->unmatched_unicast & ~mask;
DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
- "accp_mcast 0x%x\naccp_bcast 0x%x\n",
- mac_filters->ucast_drop_all,
- mac_filters->mcast_drop_all,
- mac_filters->ucast_accept_all,
- mac_filters->mcast_accept_all,
- mac_filters->bcast_accept_all);
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
+ mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
+ mac_filters->bcast_accept_all);
/* write the MAC filter structure*/
__storm_memset_mac_filters(bp, mac_filters, p->func_id);
@@ -2307,8 +2316,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
*/
bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
- DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
- "tx_accept_flags 0x%lx\n",
+ DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
@@ -2441,8 +2449,8 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
if (!new_cmd)
return -ENOMEM;
- DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
- "macs_list_len=%d\n", cmd, macs_list_len);
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+ cmd, macs_list_len);
INIT_LIST_HEAD(&new_cmd->data.macs_head);
@@ -2657,7 +2665,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- pmac_pos->mac);
+ pmac_pos->mac);
list_del(&pmac_pos->link);
@@ -3181,8 +3189,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
* matter.
*/
if (p->mcast_list_len > o->max_cmd_len) {
- BNX2X_ERR("Can't configure more than %d multicast MACs"
- "on 57710\n", o->max_cmd_len);
+ BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
+ o->max_cmd_len);
return -EINVAL;
}
/* Every configured MAC should be cleared if DEL command is
@@ -3430,7 +3438,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
&data->config_table[i].lsb_mac_addr,
elem->mac);
DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
- elem->mac);
+ elem->mac);
list_add_tail(&elem->link,
&o->registry.exact_match.macs);
}
@@ -3571,9 +3579,8 @@ int bnx2x_config_mcast(struct bnx2x *bp,
if ((!p->mcast_list_len) && (!o->check_sched(o)))
return 0;
- DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
- "o->max_cmd_len=%d\n", o->total_pending_num,
- p->mcast_list_len, o->max_cmd_len);
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
+ o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
/* Enqueue the current command to the pending list if we can't complete
* it in the current iteration
@@ -4298,9 +4305,8 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
unsigned long cur_pending = o->pending;
if (!test_and_clear_bit(cmd, &cur_pending)) {
- BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
- "pending 0x%lx, next_state %d\n", cmd,
- o->cids[BNX2X_PRIMARY_CID_INDEX],
+ BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
+ cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
o->state, cur_pending, o->next_state);
return -EINVAL;
}
@@ -4312,13 +4318,13 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
- DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
- "setting state to %d\n", cmd,
- o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+ DP(BNX2X_MSG_SP,
+ "Completing command %d for queue %d, setting state to %d\n",
+ cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
if (o->next_tx_only) /* print num tx-only if any exist */
DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
- o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
o->state = o->next_state;
o->num_tx_only = o->next_tx_only;
@@ -4430,9 +4436,10 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
struct client_init_rx_data *rx_data,
unsigned long *flags)
{
- /* Rx data */
rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+ rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
+ CLIENT_INIT_RX_DATA_TPA_MODE;
rx_data->vmqueue_mode_en_flg = 0;
rx_data->cache_line_alignment_log_size =
@@ -4476,7 +4483,7 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
- rx_data->approx_mcast_engine_id = o->func_id;
+ rx_data->approx_mcast_engine_id = params->mcast_engine_id;
rx_data->is_approx_mcast = 1;
}
@@ -4532,8 +4539,10 @@ static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
&data->tx,
&cmd_params->params.tx_only.flags);
- DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
- data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
+ DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
+ cmd_params->q_obj->cids[0],
+ data->tx.tx_bd_page_base.lo,
+ data->tx.tx_bd_page_base.hi);
}
/**
@@ -4680,10 +4689,8 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_tx_only(bp, params, rdata);
- DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
- "sp-client id %d, cos %d\n",
- o->cids[cid_index],
- rdata->general.client_id,
+ DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
+ o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
/*
@@ -5184,13 +5191,6 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
obj->set_pending = bnx2x_queue_set_pending;
}
-void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
- struct bnx2x_queue_sp_obj *obj,
- u32 cid, u8 index)
-{
- obj->cids[index] = cid;
-}
-
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
@@ -5232,9 +5232,9 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
unsigned long cur_pending = o->pending;
if (!test_and_clear_bit(cmd, &cur_pending)) {
- BNX2X_ERR("Bad MC reply %d for func %d in state %d "
- "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
- o->state, cur_pending, o->next_state);
+ BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
+ cmd, BP_FUNC(bp), o->state,
+ cur_pending, o->next_state);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 66da39f0c84a..61a7670adfcd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
/* bnx2x_sp.h: Broadcom Everest network driver.
*
- * Copyright 2011 Broadcom Corporation
+ * Copyright (c) 2011-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -315,7 +315,8 @@ struct bnx2x_vlan_mac_obj {
* @return zero if the element may be added
*/
- int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+ int (*check_add)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data);
/**
@@ -324,7 +325,8 @@ struct bnx2x_vlan_mac_obj {
* @return true if the element may be deleted
*/
struct bnx2x_vlan_mac_registry_elem *
- (*check_del)(struct bnx2x_vlan_mac_obj *o,
+ (*check_del)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data);
/**
@@ -332,7 +334,8 @@ struct bnx2x_vlan_mac_obj {
*
* @return true if the element may be deleted
*/
- bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+ bool (*check_move)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
struct bnx2x_vlan_mac_obj *dst_o,
union bnx2x_classification_ramrod_data *data);
@@ -423,6 +426,13 @@ struct bnx2x_vlan_mac_obj {
int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
};
+enum {
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0,
+ BNX2X_LLH_CAM_ETH_LINE,
+ BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
@@ -774,6 +784,7 @@ enum bnx2x_queue_cmd {
enum {
BNX2X_Q_FLG_TPA,
BNX2X_Q_FLG_TPA_IPV6,
+ BNX2X_Q_FLG_TPA_GRO,
BNX2X_Q_FLG_STATS,
BNX2X_Q_FLG_ZERO_STATS,
BNX2X_Q_FLG_ACTIVE,
@@ -803,10 +814,10 @@ enum bnx2x_q_type {
};
#define BNX2X_PRIMARY_CID_INDEX 0
-#define BNX2X_MULTI_TX_COS_E1X 1
+#define BNX2X_MULTI_TX_COS_E1X 3 /* QM only */
#define BNX2X_MULTI_TX_COS_E2_E3A0 2
#define BNX2X_MULTI_TX_COS_E3B0 3
-#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0
+#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
struct bnx2x_queue_init_params {
@@ -889,6 +900,9 @@ struct bnx2x_rxq_setup_params {
u8 max_tpa_queues;
u8 rss_engine_id;
+ /* valid iff BNX2X_Q_FLG_MCAST */
+ u8 mcast_engine_id;
+
u8 cache_line_log;
u8 sb_cq_index;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index a766b25eec5f..e1c9310fb07c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,7 +75,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
bp->fw_stats_req->hdr.drv_stats_counter =
cpu_to_le16(bp->stats_counter++);
- DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+ DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter);
@@ -128,6 +128,8 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} else if (bp->func_stx) {
*stats_comp = 0;
+ memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
+ sizeof(bp->func_stats));
bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
}
}
@@ -161,7 +163,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
/* sanity */
- if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+ if (!bp->port.pmf || !bp->port.port_stx) {
BNX2X_ERR("BUG!\n");
return;
}
@@ -626,31 +628,30 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
tx_stat_dot3statsinternalmactransmiterrors);
ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
- ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
- new->stats_tx.tx_gt1518_hi,
- estats->etherstatspkts1024octetsto1522octets_lo,
- new->stats_tx.tx_gt1518_lo);
+ estats->etherstatspkts1024octetsto1522octets_hi =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ estats->etherstatspkts1024octetsto1522octets_lo =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
- ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt2047_hi,
- estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt2047_lo);
+ estats->etherstatspktsover1522octets_hi =
+ pstats->mac_stx[1].tx_stat_mac_2047_hi;
+ estats->etherstatspktsover1522octets_lo =
+ pstats->mac_stx[1].tx_stat_mac_2047_lo;
ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt4095_hi,
+ pstats->mac_stx[1].tx_stat_mac_4095_hi,
estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt4095_lo);
+ pstats->mac_stx[1].tx_stat_mac_4095_lo);
ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt9216_hi,
+ pstats->mac_stx[1].tx_stat_mac_9216_hi,
estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt9216_lo);
-
+ pstats->mac_stx[1].tx_stat_mac_9216_lo);
ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt16383_hi,
+ pstats->mac_stx[1].tx_stat_mac_16383_hi,
estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt16383_lo);
+ pstats->mac_stx[1].tx_stat_mac_16383_lo);
estats->pause_frames_received_hi =
pstats->mac_stx[1].rx_stat_mac_xpf_hi;
@@ -803,8 +804,9 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
&bp->fw_stats_data->port.tstorm_port_statistics;
struct tstorm_per_pf_stats *tfunc =
&bp->fw_stats_data->pf.tstorm_pf_statistics;
- struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+ struct host_func_stats *fstats = &bp->func_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
int i;
u16 cur_stats_counter;
@@ -818,48 +820,35 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
- " xstorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->xstats_counter), bp->stats_counter);
return -EAGAIN;
}
if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
- " ustorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->ustats_counter), bp->stats_counter);
return -EAGAIN;
}
if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
- " cstorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->cstats_counter), bp->stats_counter);
return -EAGAIN;
}
if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
- " tstorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->tstats_counter), bp->stats_counter);
return -EAGAIN;
}
- memcpy(&(fstats->total_bytes_received_hi),
- &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
- sizeof(struct host_func_stats) - 2*sizeof(u32));
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
- estats->etherstatsoverrsizepkts_hi = 0;
- estats->etherstatsoverrsizepkts_lo = 0;
- estats->no_buff_discard_hi = 0;
- estats->no_buff_discard_lo = 0;
- estats->total_tpa_aggregations_hi = 0;
- estats->total_tpa_aggregations_lo = 0;
- estats->total_tpa_aggregated_frames_hi = 0;
- estats->total_tpa_aggregated_frames_lo = 0;
- estats->total_tpa_bytes_hi = 0;
- estats->total_tpa_bytes_lo = 0;
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -876,29 +865,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
xstorm_queue_statistics;
struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+
u32 diff;
- DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
- "bcast_sent 0x%x mcast_sent 0x%x\n",
+ DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
i, xclient->ucast_pkts_sent,
xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
DP(BNX2X_MSG_STATS, "---------------\n");
- qstats->total_broadcast_bytes_received_hi =
- le32_to_cpu(tclient->rcv_bcast_bytes.hi);
- qstats->total_broadcast_bytes_received_lo =
- le32_to_cpu(tclient->rcv_bcast_bytes.lo);
-
- qstats->total_multicast_bytes_received_hi =
- le32_to_cpu(tclient->rcv_mcast_bytes.hi);
- qstats->total_multicast_bytes_received_lo =
- le32_to_cpu(tclient->rcv_mcast_bytes.lo);
-
- qstats->total_unicast_bytes_received_hi =
- le32_to_cpu(tclient->rcv_ucast_bytes.hi);
- qstats->total_unicast_bytes_received_lo =
- le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+ UPDATE_QSTAT(tclient->rcv_bcast_bytes,
+ total_broadcast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_mcast_bytes,
+ total_multicast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_ucast_bytes,
+ total_unicast_bytes_received);
/*
* sum to total_bytes_received all
@@ -931,9 +913,9 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
total_multicast_packets_received);
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
- UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
- etherstatsoverrsizepkts);
- UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+ UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
+ etherstatsoverrsizepkts);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
total_unicast_packets_received);
@@ -941,24 +923,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
total_multicast_packets_received);
SUB_EXTEND_USTAT(bcast_no_buff_pkts,
total_broadcast_packets_received);
- UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
- UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
- UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
-
- qstats->total_broadcast_bytes_transmitted_hi =
- le32_to_cpu(xclient->bcast_bytes_sent.hi);
- qstats->total_broadcast_bytes_transmitted_lo =
- le32_to_cpu(xclient->bcast_bytes_sent.lo);
-
- qstats->total_multicast_bytes_transmitted_hi =
- le32_to_cpu(xclient->mcast_bytes_sent.hi);
- qstats->total_multicast_bytes_transmitted_lo =
- le32_to_cpu(xclient->mcast_bytes_sent.lo);
-
- qstats->total_unicast_bytes_transmitted_hi =
- le32_to_cpu(xclient->ucast_bytes_sent.hi);
- qstats->total_unicast_bytes_transmitted_lo =
- le32_to_cpu(xclient->ucast_bytes_sent.lo);
+ UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+ UPDATE_QSTAT(xclient->bcast_bytes_sent,
+ total_broadcast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->mcast_bytes_sent,
+ total_multicast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->ucast_bytes_sent,
+ total_unicast_bytes_transmitted);
+
/*
* sum to total_bytes_transmitted all
* unicast/multicast/broadcast
@@ -994,110 +969,54 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
total_transmitted_dropped_packets_error);
/* TPA aggregations completed */
- UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+ UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
/* Number of network frames aggregated by TPA */
- UPDATE_EXTEND_USTAT(coalesced_pkts,
- total_tpa_aggregated_frames);
+ UPDATE_EXTEND_E_USTAT(coalesced_pkts,
+ total_tpa_aggregated_frames);
/* Total number of bytes in completed TPA aggregations */
- qstats->total_tpa_bytes_lo =
- le32_to_cpu(uclient->coalesced_bytes.lo);
- qstats->total_tpa_bytes_hi =
- le32_to_cpu(uclient->coalesced_bytes.hi);
-
- /* TPA stats per-function */
- ADD_64(estats->total_tpa_aggregations_hi,
- qstats->total_tpa_aggregations_hi,
- estats->total_tpa_aggregations_lo,
- qstats->total_tpa_aggregations_lo);
- ADD_64(estats->total_tpa_aggregated_frames_hi,
- qstats->total_tpa_aggregated_frames_hi,
- estats->total_tpa_aggregated_frames_lo,
- qstats->total_tpa_aggregated_frames_lo);
- ADD_64(estats->total_tpa_bytes_hi,
- qstats->total_tpa_bytes_hi,
- estats->total_tpa_bytes_lo,
- qstats->total_tpa_bytes_lo);
-
- ADD_64(fstats->total_bytes_received_hi,
- qstats->total_bytes_received_hi,
- fstats->total_bytes_received_lo,
- qstats->total_bytes_received_lo);
- ADD_64(fstats->total_bytes_transmitted_hi,
- qstats->total_bytes_transmitted_hi,
- fstats->total_bytes_transmitted_lo,
- qstats->total_bytes_transmitted_lo);
- ADD_64(fstats->total_unicast_packets_received_hi,
- qstats->total_unicast_packets_received_hi,
- fstats->total_unicast_packets_received_lo,
- qstats->total_unicast_packets_received_lo);
- ADD_64(fstats->total_multicast_packets_received_hi,
- qstats->total_multicast_packets_received_hi,
- fstats->total_multicast_packets_received_lo,
- qstats->total_multicast_packets_received_lo);
- ADD_64(fstats->total_broadcast_packets_received_hi,
- qstats->total_broadcast_packets_received_hi,
- fstats->total_broadcast_packets_received_lo,
- qstats->total_broadcast_packets_received_lo);
- ADD_64(fstats->total_unicast_packets_transmitted_hi,
- qstats->total_unicast_packets_transmitted_hi,
- fstats->total_unicast_packets_transmitted_lo,
- qstats->total_unicast_packets_transmitted_lo);
- ADD_64(fstats->total_multicast_packets_transmitted_hi,
- qstats->total_multicast_packets_transmitted_hi,
- fstats->total_multicast_packets_transmitted_lo,
- qstats->total_multicast_packets_transmitted_lo);
- ADD_64(fstats->total_broadcast_packets_transmitted_hi,
- qstats->total_broadcast_packets_transmitted_hi,
- fstats->total_broadcast_packets_transmitted_lo,
- qstats->total_broadcast_packets_transmitted_lo);
- ADD_64(fstats->valid_bytes_received_hi,
- qstats->valid_bytes_received_hi,
- fstats->valid_bytes_received_lo,
- qstats->valid_bytes_received_lo);
-
- ADD_64(estats->etherstatsoverrsizepkts_hi,
- qstats->etherstatsoverrsizepkts_hi,
- estats->etherstatsoverrsizepkts_lo,
- qstats->etherstatsoverrsizepkts_lo);
- ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
- estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+ UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
+
+ UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
+
+ UPDATE_FSTAT_QSTAT(total_bytes_received);
+ UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(valid_bytes_received);
}
- ADD_64(fstats->total_bytes_received_hi,
+ ADD_64(estats->total_bytes_received_hi,
estats->rx_stat_ifhcinbadoctets_hi,
- fstats->total_bytes_received_lo,
+ estats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
- ADD_64(fstats->total_bytes_received_hi,
+ ADD_64(estats->total_bytes_received_hi,
le32_to_cpu(tfunc->rcv_error_bytes.hi),
- fstats->total_bytes_received_lo,
+ estats->total_bytes_received_lo,
le32_to_cpu(tfunc->rcv_error_bytes.lo));
- memcpy(estats, &(fstats->total_bytes_received_hi),
- sizeof(struct host_func_stats) - 2*sizeof(u32));
-
ADD_64(estats->error_bytes_received_hi,
le32_to_cpu(tfunc->rcv_error_bytes.hi),
estats->error_bytes_received_lo,
le32_to_cpu(tfunc->rcv_error_bytes.lo));
- ADD_64(estats->etherstatsoverrsizepkts_hi,
- estats->rx_stat_dot3statsframestoolong_hi,
- estats->etherstatsoverrsizepkts_lo,
- estats->rx_stat_dot3statsframestoolong_lo);
+ UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
+
ADD_64(estats->error_bytes_received_hi,
estats->rx_stat_ifhcinbadoctets_hi,
estats->error_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
if (bp->port.pmf) {
- estats->mac_filter_discard =
- le32_to_cpu(tport->mac_filter_discard);
- estats->mf_tag_discard =
- le32_to_cpu(tport->mf_tag_discard);
- estats->brb_truncate_discard =
- le32_to_cpu(tport->brb_truncate_discard);
- estats->mac_discard = le32_to_cpu(tport->mac_discard);
+ struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+ UPDATE_FW_STAT(mac_filter_discard);
+ UPDATE_FW_STAT(mf_tag_discard);
+ UPDATE_FW_STAT(brb_truncate_discard);
+ UPDATE_FW_STAT(mac_discard);
}
fstats->host_func_stats_start = ++fstats->host_func_stats_end;
@@ -1131,7 +1050,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
tmp = estats->mac_discard;
for_each_rx_queue(bp, i)
tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
- nstats->rx_dropped = tmp;
+ nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1179,17 +1098,15 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
struct bnx2x_eth_stats *estats = &bp->eth_stats;
int i;
- estats->driver_xoff = 0;
- estats->rx_err_discard_pkt = 0;
- estats->rx_skb_alloc_failed = 0;
- estats->hw_csum_err = 0;
for_each_queue(bp, i) {
struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bp->fp[i].eth_q_stats_old;
- estats->driver_xoff += qstats->driver_xoff;
- estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
- estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
- estats->hw_csum_err += qstats->hw_csum_err;
+ UPDATE_ESTAT_QSTAT(driver_xoff);
+ UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
+ UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
+ UPDATE_ESTAT_QSTAT(hw_csum_err);
}
}
@@ -1231,51 +1148,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- int i, cos;
netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
estats->brb_drop_lo, estats->brb_truncate_lo);
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-
- pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n",
- fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
- fp->rx_comp_cons),
- le16_to_cpu(*fp->rx_cons_sb),
- bnx2x_hilo(&qstats->
- total_unicast_packets_received_hi),
- fp->rx_calls, fp->rx_pkt);
- }
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_fp_txdata *txdata;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct netdev_queue *txq;
-
- pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
- fp->name,
- bnx2x_hilo(
- &qstats->total_unicast_packets_transmitted_hi),
- qstats->driver_xoff);
-
- for_each_cos_in_tx_queue(fp, cos) {
- txdata = &fp->txdata[cos];
- txq = netdev_get_tx_queue(bp->dev,
- FP_COS_TO_TXQ(fp, cos));
-
- pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n",
- cos,
- bnx2x_tx_avail(bp, txdata),
- le16_to_cpu(*txdata->tx_cons_sb),
- txdata->tx_pkt,
- (netif_tx_queue_stopped(txq) ?
- "Xoff" : "Xon")
- );
- }
- }
}
bnx2x_hw_stats_post(bp);
@@ -1434,63 +1309,6 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-static void bnx2x_func_stats_base_init(struct bnx2x *bp)
-{
- int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
- u32 func_stx;
-
- /* sanity */
- if (!bp->port.pmf || !bp->func_stx) {
- BNX2X_ERR("BUG!\n");
- return;
- }
-
- /* save our func_stx */
- func_stx = bp->func_stx;
-
- for (vn = VN_0; vn < vn_max; vn++) {
- int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
-
- bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
- bnx2x_func_stats_init(bp);
- bnx2x_hw_stats_post(bp);
- bnx2x_stats_comp(bp);
- }
-
- /* restore our func_stx */
- bp->func_stx = func_stx;
-}
-
-static void bnx2x_func_stats_base_update(struct bnx2x *bp)
-{
- struct dmae_command *dmae = &bp->stats_dmae;
- u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
- /* sanity */
- if (!bp->func_stx) {
- BNX2X_ERR("BUG!\n");
- return;
- }
-
- bp->executer_idx = 0;
- memset(dmae, 0, sizeof(struct dmae_command));
-
- dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
- true, DMAE_COMP_PCI);
- dmae->src_addr_lo = bp->func_stx >> 2;
- dmae->src_addr_hi = 0;
- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
- dmae->len = sizeof(struct host_func_stats) >> 2;
- dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
- dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
- dmae->comp_val = DMAE_COMP_VAL;
-
- *stats_comp = 0;
- bnx2x_hw_stats_post(bp);
- bnx2x_stats_comp(bp);
-}
-
/**
* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
@@ -1641,6 +1459,10 @@ void bnx2x_stats_init(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
bp->port.port_stx, bp->func_stx);
+ /* pmf should retrieve port statistics from SP on a non-init*/
+ if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+
port = BP_PORT(bp);
/* port stats */
memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
@@ -1662,24 +1484,80 @@ void bnx2x_stats_init(struct bnx2x *bp)
memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+ if (bp->stats_init) {
+ memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+ memset(&fp->eth_q_stats_old, 0,
+ sizeof(fp->eth_q_stats_old));
+ }
}
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+
+ /* Clean SP from previous statistics */
+ if (bp->func_stx) {
+ memset(bnx2x_sp(bp, func_stats), 0,
+ sizeof(struct host_func_stats));
+ bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+ }
bp->stats_state = STATS_STATE_DISABLED;
- if (bp->port.pmf) {
- if (bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
- if (bp->func_stx)
- bnx2x_func_stats_base_init(bp);
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
+void bnx2x_save_statistics(struct bnx2x *bp)
+{
+ int i;
+ struct net_device_stats *nstats = &bp->dev->stats;
+
+ /* save queue statistics */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
+ UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
+ }
+
+ /* save net_device_stats statistics */
+ bp->net_stats_old.rx_dropped = nstats->rx_dropped;
- } else if (bp->func_stx)
- bnx2x_func_stats_base_update(bp);
+ /* store port firmware statistics */
+ if (bp->port.pmf && IS_MF(bp)) {
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+ UPDATE_FW_STAT_OLD(mac_filter_discard);
+ UPDATE_FW_STAT_OLD(mf_tag_discard);
+ UPDATE_FW_STAT_OLD(brb_truncate_discard);
+ UPDATE_FW_STAT_OLD(mac_discard);
+ }
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 683deb053109..2b46e1eb7fd1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -199,6 +199,10 @@ struct bnx2x_eth_stats {
u32 pfc_frames_received_lo;
u32 pfc_frames_sent_hi;
u32 pfc_frames_sent_lo;
+
+ /* Recovery */
+ u32 recoverable_error;
+ u32 unrecoverable_error;
};
@@ -260,6 +264,69 @@ struct bnx2x_eth_q_stats {
u32 total_tpa_bytes_lo;
};
+struct bnx2x_eth_stats_old {
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+};
+
+struct bnx2x_eth_q_stats_old {
+ /* Fields to perserve over fw reset*/
+ u32 total_unicast_bytes_received_hi;
+ u32 total_unicast_bytes_received_lo;
+ u32 total_broadcast_bytes_received_hi;
+ u32 total_broadcast_bytes_received_lo;
+ u32 total_multicast_bytes_received_hi;
+ u32 total_multicast_bytes_received_lo;
+ u32 total_unicast_bytes_transmitted_hi;
+ u32 total_unicast_bytes_transmitted_lo;
+ u32 total_broadcast_bytes_transmitted_hi;
+ u32 total_broadcast_bytes_transmitted_lo;
+ u32 total_multicast_bytes_transmitted_hi;
+ u32 total_multicast_bytes_transmitted_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+
+ /* Fields to perserve last of */
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 total_tpa_bytes_hi_old;
+ u32 total_tpa_bytes_lo_old;
+
+ u32 driver_xoff_old;
+ u32 rx_err_discard_pkt_old;
+ u32 rx_skb_alloc_failed_old;
+ u32 hw_csum_err_old;
+};
+
+struct bnx2x_net_stats_old {
+ u32 rx_dropped;
+};
+
+struct bnx2x_fw_port_stats_old {
+ u32 mac_filter_discard;
+ u32 mf_tag_discard;
+ u32 brb_truncate_discard;
+ u32 mac_discard;
+};
+
+
/****************************************************************************
* Macros
****************************************************************************/
@@ -344,6 +411,12 @@ struct bnx2x_eth_q_stats {
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
+#define UPDATE_EXTEND_E_TSTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_TSTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
#define UPDATE_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
@@ -351,6 +424,12 @@ struct bnx2x_eth_q_stats {
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
+#define UPDATE_EXTEND_E_USTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_USTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
#define UPDATE_EXTEND_XSTAT(s, t) \
do { \
diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
@@ -358,6 +437,66 @@ struct bnx2x_eth_q_stats {
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
+#define UPDATE_QSTAT(s, t) \
+ do { \
+ qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
+ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+ } while (0)
+
+#define UPDATE_QSTAT_OLD(f) \
+ do { \
+ qstats_old->f = qstats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT_64(s) \
+ do { \
+ ADD_64(estats->s##_hi, qstats->s##_hi, \
+ estats->s##_lo, qstats->s##_lo); \
+ SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
+ estats->s##_lo, qstats_old->s##_lo_old); \
+ qstats_old->s##_hi_old = qstats->s##_hi; \
+ qstats_old->s##_lo_old = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT(s) \
+ do { \
+ estats->s += qstats->s; \
+ estats->s -= qstats_old->s##_old; \
+ qstats_old->s##_old = qstats->s; \
+ } while (0)
+
+#define UPDATE_FSTAT_QSTAT(s) \
+ do { \
+ ADD_64(fstats->s##_hi, qstats->s##_hi, \
+ fstats->s##_lo, qstats->s##_lo); \
+ SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
+ fstats->s##_lo, qstats_old->s##_lo); \
+ estats->s##_hi = fstats->s##_hi; \
+ estats->s##_lo = fstats->s##_lo; \
+ qstats_old->s##_hi = qstats->s##_hi; \
+ qstats_old->s##_lo = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_FW_STAT(s) \
+ do { \
+ estats->s = le32_to_cpu(tport->s) + fwstats->s; \
+ } while (0)
+
+#define UPDATE_FW_STAT_OLD(f) \
+ do { \
+ fwstats->f = estats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT(s, t) \
+ do { \
+ SUB_64(estats->s##_hi, estats_old->t##_hi, \
+ estats->s##_lo, estats_old->t##_lo); \
+ ADD_64(estats->s##_hi, estats->t##_hi, \
+ estats->s##_lo, estats->t##_lo); \
+ estats_old->t##_hi = estats->t##_hi; \
+ estats_old->t##_lo = estats->t##_lo; \
+ } while (0)
+
/* minuend -= subtrahend */
#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
do { \
@@ -384,4 +523,10 @@ void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+/**
+ * bnx2x_save_statistics - save statistics when unloading.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_save_statistics(struct bnx2x *bp);
#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 818a573669e6..7b65716b8734 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -380,6 +380,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
if (cnic_in_use(csk) &&
test_bit(SK_F_CONNECT_START, &csk->flags)) {
+ csk->vlan_id = path_resp->vlan_id;
+
memcpy(csk->ha, path_resp->mac_addr, 6);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
@@ -2521,12 +2523,35 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
u32 cid;
u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
+ u32 kcqe_op;
int ulp_type;
cid = kwqe->kwqe_info0;
memset(&kcqe, 0, sizeof(kcqe));
- if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
+ if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
+ u32 l5_cid = 0;
+
+ ulp_type = CNIC_ULP_FCOE;
+ if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
+ struct fcoe_kwqe_conn_enable_disable *req;
+
+ req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+ kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
+ cid = req->context_id;
+ l5_cid = req->conn_id;
+ } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
+ kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
+ } else {
+ return;
+ }
+ kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
+ kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
+ kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ kcqe.kcqe_info2 = cid;
+ kcqe.kcqe_info0 = l5_cid;
+
+ } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
ulp_type = CNIC_ULP_ISCSI;
if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
cid = kwqe->kwqe_info1;
@@ -2539,7 +2564,6 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
- u32 kcqe_op;
ulp_type = CNIC_ULP_L4;
if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
@@ -2686,9 +2710,17 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
opcode);
break;
}
- if (ret < 0)
+ if (ret < 0) {
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
+
+ /* Possibly bnx2x parity error, send completion
+ * to ulp drivers with error code to speed up
+ * cleanup and reset recovery.
+ */
+ if (ret == -EIO || ret == -EAGAIN)
+ cnic_bnx2x_kwqe_err(dev, kwqe);
+ }
i += work;
}
return 0;
@@ -3901,6 +3933,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
if (l4kcqe->status == 0)
set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+ else if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
+ set_bit(SK_F_HW_ERR, &csk->flags);
smp_mb__before_clear_bit();
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 86936f6b6dbc..06ca00266d70 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -69,6 +69,7 @@
#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
/* KCQ (kernel completion queue) response op codes */
#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
@@ -1392,9 +1393,9 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
#endif
u32 snd_nxt;
- u32 tx_wnd;
- u32 __reserved55;
- u32 local_adv_wnd;
+ u32 __xfrqe_bd_addr_lo;
+ u32 __xfrqe_bd_addr_hi;
+ u32 __xfrqe_data1;
#if defined(__BIG_ENDIAN)
u8 __agg_val8_th;
u8 __tx_dest;
@@ -1480,13 +1481,13 @@ struct xstorm_fcoe_extra_ag_context_section {
#endif
u32 __tcp_agg_vars6;
#if defined(__BIG_ENDIAN)
- u16 __agg_misc6;
+ u16 __xfrqe_mng;
u16 __tcp_agg_vars7;
#elif defined(__LITTLE_ENDIAN)
u16 __tcp_agg_vars7;
- u16 __agg_misc6;
+ u16 __xfrqe_mng;
#endif
- u32 __agg_val10;
+ u32 __xfrqe_data0;
u32 __agg_val10_th;
#if defined(__BIG_ENDIAN)
u16 __reserved3;
@@ -1706,11 +1707,11 @@ struct xstorm_fcoe_ag_context {
#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
#if defined(__BIG_ENDIAN)
- u16 agg_misc0;
+ u16 __cache_wqe_db;
u16 sq_prod;
#elif defined(__LITTLE_ENDIAN)
u16 sq_prod;
- u16 agg_misc0;
+ u16 __cache_wqe_db;
#endif
#if defined(__BIG_ENDIAN)
u8 agg_val3;
@@ -3016,8 +3017,8 @@ struct fcoe_tce_tx_wr_rx_rd_const {
#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
-#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
-#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_COMP_TRNS (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_COMP_TRNS_SHIFT 7
__le16 rsrv3;
__le32 verify_tx_seq;
};
@@ -4298,7 +4299,7 @@ struct xstorm_eth_context_section {
#endif
#if defined(__BIG_ENDIAN)
u16 reserved_vlan_type;
- u16 params;
+ u16 vlan_params;
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
@@ -4306,7 +4307,7 @@ struct xstorm_eth_context_section {
#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
#elif defined(__LITTLE_ENDIAN)
- u16 params;
+ u16 vlan_params;
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 1517763d4e55..60deb84d36bd 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.8"
-#define CNIC_MODULE_RELDATE "Jan 3, 2012"
+#define CNIC_MODULE_VERSION "2.5.9"
+#define CNIC_MODULE_RELDATE "Feb 8, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 084904ceaa30..49e7a258da8a 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2623,8 +2623,6 @@ static int __devinit sbmac_probe(struct platform_device *pldev)
*/
dev = alloc_etherdev(sizeof(struct sbmac_softc));
if (!dev) {
- printk(KERN_ERR "%s: unable to allocate etherdev\n",
- dev_name(&pldev->dev));
err = -ENOMEM;
goto out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 35c2a202d67a..b0657466041d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2011 Broadcom Corporation.
+ * Copyright (C) 2005-2012 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -204,6 +204,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_RAW_IP_ALIGN 2
#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
#define FIRMWARE_TG3 "tigon/tg3.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
@@ -1453,33 +1454,23 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
}
/* tp->lock is held. */
-static void tg3_ump_link_report(struct tg3 *tp)
+static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
{
- u32 reg;
- u32 val;
-
- if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
- return;
-
- tg3_wait_for_event_ack(tp);
-
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
-
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+ u32 reg, val;
val = 0;
if (!tg3_readphy(tp, MII_BMCR, &reg))
val = reg << 16;
if (!tg3_readphy(tp, MII_BMSR, &reg))
val |= (reg & 0xffff);
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
+ *data++ = val;
val = 0;
if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
val = reg << 16;
if (!tg3_readphy(tp, MII_LPA, &reg))
val |= (reg & 0xffff);
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
+ *data++ = val;
val = 0;
if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
@@ -1488,13 +1479,33 @@ static void tg3_ump_link_report(struct tg3 *tp)
if (!tg3_readphy(tp, MII_STAT1000, &reg))
val |= (reg & 0xffff);
}
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
+ *data++ = val;
if (!tg3_readphy(tp, MII_PHYADDR, &reg))
val = reg << 16;
else
val = 0;
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
+ *data++ = val;
+}
+
+/* tp->lock is held. */
+static void tg3_ump_link_report(struct tg3 *tp)
+{
+ u32 data[4];
+
+ if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
+ return;
+
+ tg3_phy_gather_ump_data(tp, data);
+
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
tg3_generate_fw_event(tp);
}
@@ -1809,13 +1820,13 @@ static void tg3_adjust_link(struct net_device *dev)
(6 << TX_LENGTHS_IPG_SHIFT) |
(32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
- if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
- (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
+ if (phydev->link != tp->old_link ||
phydev->speed != tp->link_config.active_speed ||
phydev->duplex != tp->link_config.active_duplex ||
oldflowctrl != tp->link_config.active_flowctrl)
linkmesg = 1;
+ tp->old_link = phydev->link;
tp->link_config.active_speed = phydev->speed;
tp->link_config.active_duplex = phydev->duplex;
@@ -1884,10 +1895,10 @@ static void tg3_phy_start(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
- phydev->speed = tp->link_config.orig_speed;
- phydev->duplex = tp->link_config.orig_duplex;
- phydev->autoneg = tp->link_config.orig_autoneg;
- phydev->advertising = tp->link_config.orig_advertising;
+ phydev->speed = tp->link_config.speed;
+ phydev->duplex = tp->link_config.duplex;
+ phydev->autoneg = tp->link_config.autoneg;
+ phydev->advertising = tp->link_config.advertising;
}
phy_start(phydev);
@@ -2709,9 +2720,6 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
return 0;
}
-static int tg3_setup_phy(struct tg3 *, int);
-static int tg3_halt_cpu(struct tg3 *, u32);
-
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
@@ -2978,6 +2986,259 @@ static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
return res;
}
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 len, u8 *buf)
+{
+ int i, j, rc = 0;
+ u32 val;
+
+ for (i = 0; i < len; i += 4) {
+ u32 addr;
+ __be32 data;
+
+ addr = offset + i;
+
+ memcpy(&data, buf + i, 4);
+
+ /*
+ * The SEEPROM interface expects the data to always be opposite
+ * the native endian format. We accomplish this by reversing
+ * all the operations that would have been performed on the
+ * data from a call to tg3_nvram_read_be32().
+ */
+ tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
+
+ val = tr32(GRC_EEPROM_ADDR);
+ tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+ val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR, val |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ (addr & EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_START |
+ EEPROM_ADDR_WRITE);
+
+ for (j = 0; j < 1000; j++) {
+ val = tr32(GRC_EEPROM_ADDR);
+
+ if (val & EEPROM_ADDR_COMPLETE)
+ break;
+ msleep(1);
+ }
+ if (!(val & EEPROM_ADDR_COMPLETE)) {
+ rc = -EBUSY;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int ret = 0;
+ u32 pagesize = tp->nvram_pagesize;
+ u32 pagemask = pagesize - 1;
+ u32 nvram_cmd;
+ u8 *tmp;
+
+ tmp = kmalloc(pagesize, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ while (len) {
+ int j;
+ u32 phy_addr, page_off, size;
+
+ phy_addr = offset & ~pagemask;
+
+ for (j = 0; j < pagesize; j += 4) {
+ ret = tg3_nvram_read_be32(tp, phy_addr + j,
+ (__be32 *) (tmp + j));
+ if (ret)
+ break;
+ }
+ if (ret)
+ break;
+
+ page_off = offset & pagemask;
+ size = pagesize;
+ if (len < size)
+ size = len;
+
+ len -= size;
+
+ memcpy(tmp + page_off, buf, size);
+
+ offset = offset + (pagesize - page_off);
+
+ tg3_enable_nvram_access(tp);
+
+ /*
+ * Before we can erase the flash page, we need
+ * to issue a special "write enable" command.
+ */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Erase the target page */
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Issue another write enable to start the write. */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ for (j = 0; j < pagesize; j += 4) {
+ __be32 data;
+
+ data = *((__be32 *) (tmp + j));
+
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+ tw32(NVRAM_ADDR, phy_addr + j);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+ NVRAM_CMD_WR;
+
+ if (j == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ else if (j == (pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
+ if (ret)
+ break;
+ }
+ if (ret)
+ break;
+ }
+
+ nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+ tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+ kfree(tmp);
+
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < len; i += 4, offset += 4) {
+ u32 page_off, phy_addr, nvram_cmd;
+ __be32 data;
+
+ memcpy(&data, buf + i, 4);
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+ page_off = offset % tp->nvram_pagesize;
+
+ phy_addr = tg3_nvram_phys_addr(tp, offset);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+ if (page_off == 0 || i == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ if (page_off == (tp->nvram_pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if (i == (len - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if ((nvram_cmd & NVRAM_CMD_FIRST) ||
+ !tg3_flag(tp, FLASH) ||
+ !tg3_flag(tp, 57765_PLUS))
+ tw32(NVRAM_ADDR, phy_addr);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ !tg3_flag(tp, 5755_PLUS) &&
+ (tp->nvram_jedecnum == JEDEC_ST) &&
+ (nvram_cmd & NVRAM_CMD_FIRST)) {
+ u32 cmd;
+
+ cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+ ret = tg3_nvram_exec_cmd(tp, cmd);
+ if (ret)
+ break;
+ }
+ if (!tg3_flag(tp, FLASH)) {
+ /* We always do complete word writes to eeprom. */
+ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+ }
+
+ ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
+{
+ int ret;
+
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+ ~GRC_LCLCTRL_GPIO_OUTPUT1);
+ udelay(40);
+ }
+
+ if (!tg3_flag(tp, NVRAM)) {
+ ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+ } else {
+ u32 grc_mode;
+
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
+
+ tg3_enable_nvram_access(tp);
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
+ tw32(NVRAM_WRITE1, 0x406);
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
+
+ if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
+ ret = tg3_nvram_write_block_buffered(tp, offset, len,
+ buf);
+ } else {
+ ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+ buf);
+ }
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
+
+ tg3_disable_nvram_access(tp);
+ tg3_nvram_unlock(tp);
+ }
+
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(40);
+ }
+
+ return ret;
+}
+
#define RX_CPU_SCRATCH_BASE 0x30000
#define RX_CPU_SCRATCH_SIZE 0x04000
#define TX_CPU_SCRATCH_BASE 0x34000
@@ -3264,6 +3525,8 @@ static int tg3_power_up(struct tg3 *tp)
return err;
}
+static int tg3_setup_phy(struct tg3 *, int);
+
static int tg3_power_down_prepare(struct tg3 *tp)
{
u32 misc_host_ctrl;
@@ -3302,10 +3565,10 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
- tp->link_config.orig_speed = phydev->speed;
- tp->link_config.orig_duplex = phydev->duplex;
- tp->link_config.orig_autoneg = phydev->autoneg;
- tp->link_config.orig_advertising = phydev->advertising;
+ tp->link_config.speed = phydev->speed;
+ tp->link_config.duplex = phydev->duplex;
+ tp->link_config.autoneg = phydev->autoneg;
+ tp->link_config.advertising = phydev->advertising;
advertising = ADVERTISED_TP |
ADVERTISED_Pause |
@@ -3338,19 +3601,11 @@ static int tg3_power_down_prepare(struct tg3 *tp)
} else {
do_low_power = true;
- if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
- tp->link_config.orig_speed = tp->link_config.speed;
- tp->link_config.orig_duplex = tp->link_config.duplex;
- tp->link_config.orig_autoneg = tp->link_config.autoneg;
- }
- if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
- tp->link_config.speed = SPEED_10;
- tp->link_config.duplex = DUPLEX_HALF;
- tp->link_config.autoneg = AUTONEG_ENABLE;
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
tg3_setup_phy(tp, 0);
- }
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3559,8 +3814,8 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
DUPLEX_HALF;
break;
}
- *speed = SPEED_INVALID;
- *duplex = DUPLEX_INVALID;
+ *speed = SPEED_UNKNOWN;
+ *duplex = DUPLEX_UNKNOWN;
break;
}
}
@@ -3640,51 +3895,33 @@ done:
static void tg3_phy_copper_begin(struct tg3 *tp)
{
- u32 new_adv;
- int i;
+ if (tp->link_config.autoneg == AUTONEG_ENABLE ||
+ (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ u32 adv, fc;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
- new_adv = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full;
- if (tg3_flag(tp, WOL_SPEED_100MB))
- new_adv |= ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full;
-
- tg3_phy_autoneg_cfg(tp, new_adv,
- FLOW_CTRL_TX | FLOW_CTRL_RX);
- } else if (tp->link_config.speed == SPEED_INVALID) {
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- tp->link_config.advertising &=
- ~(ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full);
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ adv = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB))
+ adv |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
- tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
- tp->link_config.flowctrl);
- } else {
- /* Asking for a specific link mode. */
- if (tp->link_config.speed == SPEED_1000) {
- if (tp->link_config.duplex == DUPLEX_FULL)
- new_adv = ADVERTISED_1000baseT_Full;
- else
- new_adv = ADVERTISED_1000baseT_Half;
- } else if (tp->link_config.speed == SPEED_100) {
- if (tp->link_config.duplex == DUPLEX_FULL)
- new_adv = ADVERTISED_100baseT_Full;
- else
- new_adv = ADVERTISED_100baseT_Half;
+ fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
- if (tp->link_config.duplex == DUPLEX_FULL)
- new_adv = ADVERTISED_10baseT_Full;
- else
- new_adv = ADVERTISED_10baseT_Half;
+ adv = tp->link_config.advertising;
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ adv &= ~(ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ fc = tp->link_config.flowctrl;
}
- tg3_phy_autoneg_cfg(tp, new_adv,
- tp->link_config.flowctrl);
- }
+ tg3_phy_autoneg_cfg(tp, adv, fc);
- if (tp->link_config.autoneg == AUTONEG_DISABLE &&
- tp->link_config.speed != SPEED_INVALID) {
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ } else {
+ int i;
u32 bmcr, orig_bmcr;
tp->link_config.active_speed = tp->link_config.speed;
@@ -3726,9 +3963,6 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
tg3_writephy(tp, MII_BMCR, bmcr);
udelay(40);
}
- } else {
- tg3_writephy(tp, MII_BMCR,
- BMCR_ANENABLE | BMCR_ANRESTART);
}
}
@@ -3778,7 +4012,16 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
return false;
- tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (tgtadv &&
+ (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
+ tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+ } else {
+ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ }
+
if (tg3_ctrl != tgtadv)
return false;
}
@@ -3909,8 +4152,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
current_link_up = 0;
- current_speed = SPEED_INVALID;
- current_duplex = DUPLEX_INVALID;
+ current_speed = SPEED_UNKNOWN;
+ current_duplex = DUPLEX_UNKNOWN;
tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
tp->link_config.rmt_adv = 0;
@@ -4806,8 +5049,8 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
LED_CTRL_LNKLED_OVERRIDE |
LED_CTRL_1000MBPS_ON));
} else {
- tp->link_config.active_speed = SPEED_INVALID;
- tp->link_config.active_duplex = DUPLEX_INVALID;
+ tp->link_config.active_speed = SPEED_UNKNOWN;
+ tp->link_config.active_duplex = DUPLEX_UNKNOWN;
tw32(MAC_LED_CTRL, (tp->led_ctrl |
LED_CTRL_LNKLED_OVERRIDE |
LED_CTRL_TRAFFIC_OVERRIDE));
@@ -4855,8 +5098,8 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
tg3_phy_reset(tp);
current_link_up = 0;
- current_speed = SPEED_INVALID;
- current_duplex = DUPLEX_INVALID;
+ current_speed = SPEED_UNKNOWN;
+ current_duplex = DUPLEX_UNKNOWN;
tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -5685,6 +5928,9 @@ next_pkt_nopost:
/* Refill RX ring(s). */
if (!tg3_flag(tp, ENABLE_RSS)) {
+ /* Sync BD data before updating mailbox */
+ wmb();
+
if (work_mask & RXD_OPAQUE_RING_STD) {
tpr->rx_std_prod_idx = std_prod_idx &
tp->rx_std_ring_mask;
@@ -5921,6 +6167,7 @@ static inline void tg3_reset_task_cancel(struct tg3 *tp)
{
cancel_work_sync(&tp->reset_task);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
}
static int tg3_poll_msix(struct napi_struct *napi, int budget)
@@ -6292,33 +6539,6 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id)
return IRQ_RETVAL(0);
}
-static int tg3_init_hw(struct tg3 *, int);
-static int tg3_halt(struct tg3 *, int, int);
-
-/* Restart hardware after configuration changes, self-test, etc.
- * Invoked with tp->lock held.
- */
-static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
- __releases(tp->lock)
- __acquires(tp->lock)
-{
- int err;
-
- err = tg3_init_hw(tp, reset_phy);
- if (err) {
- netdev_err(tp->dev,
- "Failed to re-initialize device, aborting\n");
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_full_unlock(tp);
- del_timer_sync(&tp->timer);
- tp->irq_sync = 0;
- tg3_napi_enable(tp);
- dev_close(tp->dev);
- tg3_full_lock(tp, 0);
- }
- return err;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
@@ -6330,50 +6550,6 @@ static void tg3_poll_controller(struct net_device *dev)
}
#endif
-static void tg3_reset_task(struct work_struct *work)
-{
- struct tg3 *tp = container_of(work, struct tg3, reset_task);
- int err;
-
- tg3_full_lock(tp, 0);
-
- if (!netif_running(tp->dev)) {
- tg3_flag_clear(tp, RESET_TASK_PENDING);
- tg3_full_unlock(tp);
- return;
- }
-
- tg3_full_unlock(tp);
-
- tg3_phy_stop(tp);
-
- tg3_netif_stop(tp);
-
- tg3_full_lock(tp, 1);
-
- if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
- tp->write32_tx_mbox = tg3_write32_tx_mbox;
- tp->write32_rx_mbox = tg3_write_flush_reg32;
- tg3_flag_set(tp, MBOX_WRITE_REORDER);
- tg3_flag_clear(tp, TX_RECOVERY_PENDING);
- }
-
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- err = tg3_init_hw(tp, 1);
- if (err)
- goto out;
-
- tg3_netif_start(tp);
-
-out:
- tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
-
- tg3_flag_clear(tp, RESET_TASK_PENDING);
-}
-
static void tg3_tx_timeout(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6745,7 +6921,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
mss, vlan)) {
would_hit_hwbug = 1;
- /* Now loop through additional data fragments, and queue them. */
} else if (skb_shinfo(skb)->nr_frags > 0) {
u32 tmp_mss = mss;
@@ -6754,6 +6929,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
!tg3_flag(tp, HW_TSO_3))
tmp_mss = 0;
+ /* Now loop through additional data
+ * fragments, and queue them.
+ */
last = skb_shinfo(skb)->nr_frags - 1;
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6795,6 +6973,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
netdev_tx_sent_queue(txq, skb->len);
+ /* Sync BD data before updating mailbox */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6993,66 +7174,6 @@ static int tg3_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
- int new_mtu)
-{
- dev->mtu = new_mtu;
-
- if (new_mtu > ETH_DATA_LEN) {
- if (tg3_flag(tp, 5780_CLASS)) {
- netdev_update_features(dev);
- tg3_flag_clear(tp, TSO_CAPABLE);
- } else {
- tg3_flag_set(tp, JUMBO_RING_ENABLE);
- }
- } else {
- if (tg3_flag(tp, 5780_CLASS)) {
- tg3_flag_set(tp, TSO_CAPABLE);
- netdev_update_features(dev);
- }
- tg3_flag_clear(tp, JUMBO_RING_ENABLE);
- }
-}
-
-static int tg3_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct tg3 *tp = netdev_priv(dev);
- int err;
-
- if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
- return -EINVAL;
-
- if (!netif_running(dev)) {
- /* We'll just catch it later when the
- * device is up'd.
- */
- tg3_set_mtu(dev, tp, new_mtu);
- return 0;
- }
-
- tg3_phy_stop(tp);
-
- tg3_netif_stop(tp);
-
- tg3_full_lock(tp, 1);
-
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-
- tg3_set_mtu(dev, tp, new_mtu);
-
- err = tg3_restart_hw(tp, 0);
-
- if (!err)
- tg3_netif_start(tp);
-
- tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
-
- return err;
-}
-
static void tg3_rx_prodring_free(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr)
{
@@ -7908,7 +8029,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
if (tp->hw_stats) {
/* Save the stats across chip resets... */
- tg3_get_nstats(tp, &tp->net_stats_prev),
+ tg3_get_nstats(tp, &tp->net_stats_prev);
tg3_get_estats(tp, &tp->estats_prev);
/* And make sure the next sample is new data */
@@ -7928,7 +8049,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
int err = 0, skip_mac_1 = 0;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
@@ -7976,7 +8097,6 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
nic_addr);
}
-static void __tg3_set_rx_mode(struct net_device *);
static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
{
int i;
@@ -8213,6 +8333,93 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
}
+static inline u32 calc_crc(unsigned char *buf, int len)
+{
+ u32 reg;
+ u32 tmp;
+ int j, k;
+
+ reg = 0xffffffff;
+
+ for (j = 0; j < len; j++) {
+ reg ^= buf[j];
+
+ for (k = 0; k < 8; k++) {
+ tmp = reg & 0x01;
+
+ reg >>= 1;
+
+ if (tmp)
+ reg ^= 0xedb88320;
+ }
+ }
+
+ return ~reg;
+}
+
+static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
+{
+ /* accept or reject all multicast frames */
+ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
+}
+
+static void __tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 rx_mode;
+
+ rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
+ RX_MODE_KEEP_VLAN_TAG);
+
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
+ /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
+ * flag clear.
+ */
+ if (!tg3_flag(tp, ENABLE_ASF))
+ rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#endif
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode. */
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast. */
+ tg3_set_multi(tp, 1);
+ } else if (netdev_mc_empty(dev)) {
+ /* Reject all multicast. */
+ tg3_set_multi(tp, 0);
+ } else {
+ /* Accept one or more multicast(s). */
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0, };
+ u32 regidx;
+ u32 bit;
+ u32 crc;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
+ bit = ~crc & 0x7f;
+ regidx = (bit & 0x60) >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ tw32(MAC_HASH_REG_0, mc_filter[0]);
+ tw32(MAC_HASH_REG_1, mc_filter[1]);
+ tw32(MAC_HASH_REG_2, mc_filter[2]);
+ tw32(MAC_HASH_REG_3, mc_filter[3]);
+ }
+
+ if (rx_mode != tp->rx_mode) {
+ tp->rx_mode = rx_mode;
+ tw32_f(MAC_RX_MODE, rx_mode);
+ udelay(10);
+ }
+}
+
static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
{
int i;
@@ -8688,9 +8895,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
- rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
-
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
@@ -9037,12 +9241,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (!tg3_flag(tp, USE_PHYLIB)) {
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
- tp->link_config.speed = tp->link_config.orig_speed;
- tp->link_config.duplex = tp->link_config.orig_duplex;
- tp->link_config.autoneg = tp->link_config.orig_autoneg;
- }
err = tg3_setup_phy(tp, 0);
if (err)
@@ -9345,6 +9545,108 @@ restart_timer:
add_timer(&tp->timer);
}
+static void __devinit tg3_timer_init(struct tg3 *tp)
+{
+ if (tg3_flag(tp, TAGGED_STATUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ !tg3_flag(tp, 57765_CLASS))
+ tp->timer_offset = HZ;
+ else
+ tp->timer_offset = HZ / 10;
+
+ BUG_ON(tp->timer_offset > HZ);
+
+ tp->timer_multiplier = (HZ / tp->timer_offset);
+ tp->asf_multiplier = (HZ / tp->timer_offset) *
+ TG3_FW_UPDATE_FREQ_SEC;
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long) tp;
+ tp->timer.function = tg3_timer;
+}
+
+static void tg3_timer_start(struct tg3 *tp)
+{
+ tp->asf_counter = tp->asf_multiplier;
+ tp->timer_counter = tp->timer_multiplier;
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+}
+
+static void tg3_timer_stop(struct tg3 *tp)
+{
+ del_timer_sync(&tp->timer);
+}
+
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+ __releases(tp->lock)
+ __acquires(tp->lock)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ tg3_timer_stop(tp);
+ tp->irq_sync = 0;
+ tg3_napi_enable(tp);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
+static void tg3_reset_task(struct work_struct *work)
+{
+ struct tg3 *tp = container_of(work, struct tg3, reset_task);
+ int err;
+
+ tg3_full_lock(tp, 0);
+
+ if (!netif_running(tp->dev)) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_full_unlock(tp);
+ return;
+ }
+
+ tg3_full_unlock(tp);
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
+ tp->write32_tx_mbox = tg3_write32_tx_mbox;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+ }
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+ err = tg3_init_hw(tp, 1);
+ if (err)
+ goto out;
+
+ tg3_netif_start(tp);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+}
+
static int tg3_request_irq(struct tg3 *tp, int irq_num)
{
irq_handler_t fn;
@@ -9399,7 +9701,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
}
err = request_irq(tnapi->irq_vec, tg3_test_isr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
+ IRQF_SHARED, dev->name, tnapi);
if (err)
return err;
@@ -9710,24 +10012,6 @@ static int tg3_open(struct net_device *dev)
if (err) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
- } else {
- if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- !tg3_flag(tp, 57765_CLASS))
- tp->timer_offset = HZ;
- else
- tp->timer_offset = HZ / 10;
-
- BUG_ON(tp->timer_offset > HZ);
- tp->timer_counter = tp->timer_multiplier =
- (HZ / tp->timer_offset);
- tp->asf_counter = tp->asf_multiplier =
- ((HZ / tp->timer_offset) * 2);
-
- init_timer(&tp->timer);
- tp->timer.expires = jiffies + tp->timer_offset;
- tp->timer.data = (unsigned long) tp;
- tp->timer.function = tg3_timer;
}
tg3_full_unlock(tp);
@@ -9759,7 +10043,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
- add_timer(&tp->timer);
+ tg3_timer_start(tp);
tg3_flag_set(tp, INIT_COMPLETE);
tg3_enable_ints(tp);
@@ -9804,7 +10088,7 @@ static int tg3_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
- del_timer_sync(&tp->timer);
+ tg3_timer_stop(tp);
tg3_phy_stop(tp);
@@ -9878,9 +10162,6 @@ static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
- if (!hw_stats)
- return;
-
ESTAT_ADD(rx_octets);
ESTAT_ADD(rx_fragments);
ESTAT_ADD(rx_ucast_packets);
@@ -10016,105 +10297,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
stats->tx_dropped = tp->tx_dropped;
}
-static inline u32 calc_crc(unsigned char *buf, int len)
-{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= 0xedb88320;
- }
- }
-
- return ~reg;
-}
-
-static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
-{
- /* accept or reject all multicast frames */
- tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
- tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
- tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
- tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
-}
-
-static void __tg3_set_rx_mode(struct net_device *dev)
-{
- struct tg3 *tp = netdev_priv(dev);
- u32 rx_mode;
-
- rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
- RX_MODE_KEEP_VLAN_TAG);
-
-#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
- /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
- * flag clear.
- */
- if (!tg3_flag(tp, ENABLE_ASF))
- rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#endif
-
- if (dev->flags & IFF_PROMISC) {
- /* Promiscuous mode. */
- rx_mode |= RX_MODE_PROMISC;
- } else if (dev->flags & IFF_ALLMULTI) {
- /* Accept all multicast. */
- tg3_set_multi(tp, 1);
- } else if (netdev_mc_empty(dev)) {
- /* Reject all multicast. */
- tg3_set_multi(tp, 0);
- } else {
- /* Accept one or more multicast(s). */
- struct netdev_hw_addr *ha;
- u32 mc_filter[4] = { 0, };
- u32 regidx;
- u32 bit;
- u32 crc;
-
- netdev_for_each_mc_addr(ha, dev) {
- crc = calc_crc(ha->addr, ETH_ALEN);
- bit = ~crc & 0x7f;
- regidx = (bit & 0x60) >> 5;
- bit &= 0x1f;
- mc_filter[regidx] |= (1 << bit);
- }
-
- tw32(MAC_HASH_REG_0, mc_filter[0]);
- tw32(MAC_HASH_REG_1, mc_filter[1]);
- tw32(MAC_HASH_REG_2, mc_filter[2]);
- tw32(MAC_HASH_REG_3, mc_filter[3]);
- }
-
- if (rx_mode != tp->rx_mode) {
- tp->rx_mode = rx_mode;
- tw32_f(MAC_RX_MODE, rx_mode);
- udelay(10);
- }
-}
-
-static void tg3_set_rx_mode(struct net_device *dev)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (!netif_running(dev))
- return;
-
- tg3_full_lock(tp, 0);
- __tg3_set_rx_mode(dev);
- tg3_full_unlock(tp);
-}
-
static int tg3_get_regs_len(struct net_device *dev)
{
return TG3_REG_BLK_SIZE;
@@ -10209,8 +10391,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return 0;
}
-static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
-
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
@@ -10324,8 +10504,8 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->eth_tp_mdix = ETH_TP_MDI;
}
} else {
- ethtool_cmd_speed_set(cmd, SPEED_INVALID);
- cmd->duplex = DUPLEX_INVALID;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
@@ -10407,18 +10587,14 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_ENABLE) {
tp->link_config.advertising = (cmd->advertising |
ADVERTISED_Autoneg);
- tp->link_config.speed = SPEED_INVALID;
- tp->link_config.duplex = DUPLEX_INVALID;
+ tp->link_config.speed = SPEED_UNKNOWN;
+ tp->link_config.duplex = DUPLEX_UNKNOWN;
} else {
tp->link_config.advertising = 0;
tp->link_config.speed = speed;
tp->link_config.duplex = cmd->duplex;
}
- tp->link_config.orig_speed = tp->link_config.speed;
- tp->link_config.orig_duplex = tp->link_config.duplex;
- tp->link_config.orig_autoneg = tp->link_config.autoneg;
-
if (netif_running(dev))
tg3_setup_phy(tp, 1);
@@ -10665,10 +10841,10 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (!epause->autoneg)
tg3_setup_flow_control(tp, 0, 0);
} else {
- tp->link_config.orig_advertising &=
+ tp->link_config.advertising &=
~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- tp->link_config.orig_advertising |= newadv;
+ tp->link_config.advertising |= newadv;
}
} else {
int irq_sync = 0;
@@ -10845,7 +11021,10 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
{
struct tg3 *tp = netdev_priv(dev);
- tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
+ if (tp->hw_stats)
+ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
+ else
+ memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
}
static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11554,6 +11733,10 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
} else {
num_pkts = 1;
data_off = ETH_HLEN;
+
+ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+ tx_len > VLAN_ETH_FRAME_LEN)
+ base_flags |= TXD_FLAG_JMB_PKT;
}
for (i = data_off; i < tx_len; i++)
@@ -11586,6 +11769,9 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
tnapi->tx_prod++;
+ /* Sync BD data before updating mailbox */
+ wmb();
+
tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
tr32_mailbox(tnapi->prodmbox);
@@ -11684,6 +11870,10 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
{
int err = -EIO;
u32 eee_cap;
+ u32 jmb_pkt_sz = 9000;
+
+ if (tp->dma_limit)
+ jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
@@ -11727,7 +11917,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
data[0] |= TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
- tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
data[0] |= TG3_JMB_LOOPBACK_FAILED;
tg3_mac_loopback(tp, false);
@@ -11752,7 +11942,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tg3_run_loopback(tp, ETH_FRAME_LEN, true))
data[1] |= TG3_TSO_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
- tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
data[1] |= TG3_JMB_LOOPBACK_FAILED;
if (do_extlpbk) {
@@ -11770,7 +11960,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tg3_run_loopback(tp, ETH_FRAME_LEN, true))
data[2] |= TG3_TSO_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
- tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
data[2] |= TG3_JMB_LOOPBACK_FAILED;
}
@@ -12026,6 +12216,117 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.set_rxfh_indir = tg3_set_rxfh_indir,
};
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tp->hw_stats)
+ return &tp->net_stats_prev;
+
+ spin_lock_bh(&tp->lock);
+ tg3_get_nstats(tp, stats);
+ spin_unlock_bh(&tp->lock);
+
+ return stats;
+}
+
+static void tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ tg3_full_lock(tp, 0);
+ __tg3_set_rx_mode(dev);
+ tg3_full_unlock(tp);
+}
+
+static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
+ int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ if (new_mtu > ETH_DATA_LEN) {
+ if (tg3_flag(tp, 5780_CLASS)) {
+ netdev_update_features(dev);
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else {
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
+ }
+ } else {
+ if (tg3_flag(tp, 5780_CLASS)) {
+ tg3_flag_set(tp, TSO_CAPABLE);
+ netdev_update_features(dev);
+ }
+ tg3_flag_clear(tp, JUMBO_RING_ENABLE);
+ }
+}
+
+static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err, reset_phy = 0;
+
+ if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd.
+ */
+ tg3_set_mtu(dev, tp, new_mtu);
+ return 0;
+ }
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+ tg3_set_mtu(dev, tp, new_mtu);
+
+ /* Reset PHY, otherwise the read DMA engine will be in a mode that
+ * breaks all requests to 256 bytes.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ reset_phy = 1;
+
+ err = tg3_restart_hw(tp, reset_phy);
+
+ if (!err)
+ tg3_netif_start(tp);
+
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static const struct net_device_ops tg3_netdev_ops = {
+ .ndo_open = tg3_open,
+ .ndo_stop = tg3_close,
+ .ndo_start_xmit = tg3_start_xmit,
+ .ndo_get_stats64 = tg3_get_stats64,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = tg3_set_rx_mode,
+ .ndo_set_mac_address = tg3_set_mac_addr,
+ .ndo_do_ioctl = tg3_ioctl,
+ .ndo_tx_timeout = tg3_tx_timeout,
+ .ndo_change_mtu = tg3_change_mtu,
+ .ndo_fix_features = tg3_fix_features,
+ .ndo_set_features = tg3_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tg3_poll_controller,
+#endif
+};
+
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
{
u32 cursize, val, magic;
@@ -12717,254 +13018,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
}
}
-static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
- u32 offset, u32 len, u8 *buf)
-{
- int i, j, rc = 0;
- u32 val;
-
- for (i = 0; i < len; i += 4) {
- u32 addr;
- __be32 data;
-
- addr = offset + i;
-
- memcpy(&data, buf + i, 4);
-
- /*
- * The SEEPROM interface expects the data to always be opposite
- * the native endian format. We accomplish this by reversing
- * all the operations that would have been performed on the
- * data from a call to tg3_nvram_read_be32().
- */
- tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
-
- val = tr32(GRC_EEPROM_ADDR);
- tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
-
- val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
- EEPROM_ADDR_READ);
- tw32(GRC_EEPROM_ADDR, val |
- (0 << EEPROM_ADDR_DEVID_SHIFT) |
- (addr & EEPROM_ADDR_ADDR_MASK) |
- EEPROM_ADDR_START |
- EEPROM_ADDR_WRITE);
-
- for (j = 0; j < 1000; j++) {
- val = tr32(GRC_EEPROM_ADDR);
-
- if (val & EEPROM_ADDR_COMPLETE)
- break;
- msleep(1);
- }
- if (!(val & EEPROM_ADDR_COMPLETE)) {
- rc = -EBUSY;
- break;
- }
- }
-
- return rc;
-}
-
-/* offset and length are dword aligned */
-static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
- u8 *buf)
-{
- int ret = 0;
- u32 pagesize = tp->nvram_pagesize;
- u32 pagemask = pagesize - 1;
- u32 nvram_cmd;
- u8 *tmp;
-
- tmp = kmalloc(pagesize, GFP_KERNEL);
- if (tmp == NULL)
- return -ENOMEM;
-
- while (len) {
- int j;
- u32 phy_addr, page_off, size;
-
- phy_addr = offset & ~pagemask;
-
- for (j = 0; j < pagesize; j += 4) {
- ret = tg3_nvram_read_be32(tp, phy_addr + j,
- (__be32 *) (tmp + j));
- if (ret)
- break;
- }
- if (ret)
- break;
-
- page_off = offset & pagemask;
- size = pagesize;
- if (len < size)
- size = len;
-
- len -= size;
-
- memcpy(tmp + page_off, buf, size);
-
- offset = offset + (pagesize - page_off);
-
- tg3_enable_nvram_access(tp);
-
- /*
- * Before we can erase the flash page, we need
- * to issue a special "write enable" command.
- */
- nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
-
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
- break;
-
- /* Erase the target page */
- tw32(NVRAM_ADDR, phy_addr);
-
- nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
- NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
-
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
- break;
-
- /* Issue another write enable to start the write. */
- nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
-
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
- break;
-
- for (j = 0; j < pagesize; j += 4) {
- __be32 data;
-
- data = *((__be32 *) (tmp + j));
-
- tw32(NVRAM_WRDATA, be32_to_cpu(data));
-
- tw32(NVRAM_ADDR, phy_addr + j);
-
- nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
- NVRAM_CMD_WR;
-
- if (j == 0)
- nvram_cmd |= NVRAM_CMD_FIRST;
- else if (j == (pagesize - 4))
- nvram_cmd |= NVRAM_CMD_LAST;
-
- if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
- break;
- }
- if (ret)
- break;
- }
-
- nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
- tg3_nvram_exec_cmd(tp, nvram_cmd);
-
- kfree(tmp);
-
- return ret;
-}
-
-/* offset and length are dword aligned */
-static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
- u8 *buf)
-{
- int i, ret = 0;
-
- for (i = 0; i < len; i += 4, offset += 4) {
- u32 page_off, phy_addr, nvram_cmd;
- __be32 data;
-
- memcpy(&data, buf + i, 4);
- tw32(NVRAM_WRDATA, be32_to_cpu(data));
-
- page_off = offset % tp->nvram_pagesize;
-
- phy_addr = tg3_nvram_phys_addr(tp, offset);
-
- tw32(NVRAM_ADDR, phy_addr);
-
- nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
-
- if (page_off == 0 || i == 0)
- nvram_cmd |= NVRAM_CMD_FIRST;
- if (page_off == (tp->nvram_pagesize - 4))
- nvram_cmd |= NVRAM_CMD_LAST;
-
- if (i == (len - 4))
- nvram_cmd |= NVRAM_CMD_LAST;
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
- !tg3_flag(tp, 5755_PLUS) &&
- (tp->nvram_jedecnum == JEDEC_ST) &&
- (nvram_cmd & NVRAM_CMD_FIRST)) {
-
- if ((ret = tg3_nvram_exec_cmd(tp,
- NVRAM_CMD_WREN | NVRAM_CMD_GO |
- NVRAM_CMD_DONE)))
-
- break;
- }
- if (!tg3_flag(tp, FLASH)) {
- /* We always do complete word writes to eeprom. */
- nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
- }
-
- if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
- break;
- }
- return ret;
-}
-
-/* offset and length are dword aligned */
-static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
-{
- int ret;
-
- if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
- tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
- ~GRC_LCLCTRL_GPIO_OUTPUT1);
- udelay(40);
- }
-
- if (!tg3_flag(tp, NVRAM)) {
- ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
- } else {
- u32 grc_mode;
-
- ret = tg3_nvram_lock(tp);
- if (ret)
- return ret;
-
- tg3_enable_nvram_access(tp);
- if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
- tw32(NVRAM_WRITE1, 0x406);
-
- grc_mode = tr32(GRC_MODE);
- tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
-
- if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
- ret = tg3_nvram_write_block_buffered(tp, offset, len,
- buf);
- } else {
- ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
- buf);
- }
-
- grc_mode = tr32(GRC_MODE);
- tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
-
- tg3_disable_nvram_access(tp);
- tg3_nvram_unlock(tp);
- }
-
- if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
- tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
- udelay(40);
- }
-
- return ret;
-}
-
struct subsys_tbl_ent {
u16 subsys_vendor, subsys_devid;
u32 phy_id;
@@ -13315,14 +13368,13 @@ static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
adv |= ADVERTISED_FIBRE;
tp->link_config.advertising = adv;
- tp->link_config.speed = SPEED_INVALID;
- tp->link_config.duplex = DUPLEX_INVALID;
+ tp->link_config.speed = SPEED_UNKNOWN;
+ tp->link_config.duplex = DUPLEX_UNKNOWN;
tp->link_config.autoneg = AUTONEG_ENABLE;
- tp->link_config.active_speed = SPEED_INVALID;
- tp->link_config.active_duplex = DUPLEX_INVALID;
- tp->link_config.orig_speed = SPEED_INVALID;
- tp->link_config.orig_duplex = DUPLEX_INVALID;
- tp->link_config.orig_autoneg = AUTONEG_INVALID;
+ tp->link_config.active_speed = SPEED_UNKNOWN;
+ tp->link_config.active_duplex = DUPLEX_UNKNOWN;
+
+ tp->old_link = -1;
}
static int __devinit tg3_phy_probe(struct tg3 *tp)
@@ -13819,8 +13871,6 @@ done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
-static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
-
static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
{
if (tg3_flag(tp, LRG_PROD_RING_CAP))
@@ -13838,49 +13888,50 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
{ },
};
-static int __devinit tg3_get_invariants(struct tg3 *tp)
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
{
- u32 misc_ctrl_reg;
- u32 pci_state_reg, grc_misc_cfg;
- u32 val;
- u16 pci_cmd;
- int err;
+ struct pci_dev *peer;
+ unsigned int func, devnr = tp->pdev->devfn & ~7;
- /* Force memory write invalidate off. If we leave it on,
- * then on 5700_BX chips we have to enable a workaround.
- * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
- * to match the cacheline size. The Broadcom driver have this
- * workaround but turns MWI off all the times so never uses
- * it. This seems to suggest that the workaround is insufficient.
+ for (func = 0; func < 8; func++) {
+ peer = pci_get_slot(tp->pdev->bus, devnr | func);
+ if (peer && peer != tp->pdev)
+ break;
+ pci_dev_put(peer);
+ }
+ /* 5704 can be configured in single-port mode, set peer to
+ * tp->pdev in that case.
*/
- pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
- pci_cmd &= ~PCI_COMMAND_INVALIDATE;
- pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+ if (!peer) {
+ peer = tp->pdev;
+ return peer;
+ }
- /* Important! -- Make sure register accesses are byteswapped
- * correctly. Also, for those chips that require it, make
- * sure that indirect register accesses are enabled before
- * the first operation.
+ /*
+ * We don't need to keep the refcount elevated; there's no way
+ * to remove one half of this device without removing the other
*/
- pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- &misc_ctrl_reg);
- tp->misc_host_ctrl |= (misc_ctrl_reg &
- MISC_HOST_CTRL_CHIPREV);
- pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
+ pci_dev_put(peer);
- tp->pci_chip_rev_id = (misc_ctrl_reg >>
- MISC_HOST_CTRL_CHIPREV_SHIFT);
+ return peer;
+}
+
+static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
+{
+ tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
- u32 prod_id_asic_rev;
+ u32 reg;
+
+ /* All devices that use the alternate
+ * ASIC REV location have a CPMU.
+ */
+ tg3_flag_set(tp, CPMU_PRESENT);
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
- pci_read_config_dword(tp->pdev,
- TG3PCI_GEN2_PRODID_ASICREV,
- &prod_id_asic_rev);
+ reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
@@ -13891,14 +13942,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
- pci_read_config_dword(tp->pdev,
- TG3PCI_GEN15_PRODID_ASICREV,
- &prod_id_asic_rev);
+ reg = TG3PCI_GEN15_PRODID_ASICREV;
else
- pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
- &prod_id_asic_rev);
+ reg = TG3PCI_PRODID_ASICREV;
- tp->pci_chip_rev_id = prod_id_asic_rev;
+ pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
}
/* Wrong chip ID in 5752 A0. This code can be removed later
@@ -13907,6 +13955,77 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_flag_set(tp, 5717_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tg3_flag_set(tp, 57765_CLASS);
+
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, 57765_PLUS);
+
+ /* Intentionally exclude ASIC_REV_5906 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, 5755_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ tg3_flag_set(tp, 5780_CLASS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ tg3_flag(tp, 5755_PLUS) ||
+ tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, 5750_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, 5705_PLUS);
+}
+
+static int __devinit tg3_get_invariants(struct tg3 *tp)
+{
+ u32 misc_ctrl_reg;
+ u32 pci_state_reg, grc_misc_cfg;
+ u32 val;
+ u16 pci_cmd;
+ int err;
+
+ /* Force memory write invalidate off. If we leave it on,
+ * then on 5700_BX chips we have to enable a workaround.
+ * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
+ * to match the cacheline size. The Broadcom driver have this
+ * workaround but turns MWI off all the times so never uses
+ * it. This seems to suggest that the workaround is insufficient.
+ */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+ /* Important! -- Make sure register accesses are byteswapped
+ * correctly. Also, for those chips that require it, make
+ * sure that indirect register accesses are enabled before
+ * the first operation.
+ */
+ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ &misc_ctrl_reg);
+ tp->misc_host_ctrl |= (misc_ctrl_reg &
+ MISC_HOST_CTRL_CHIPREV);
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ tg3_detect_asic_rev(tp, misc_ctrl_reg);
+
/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
* we need to disable memory and use config. cycles
* only to access all registers. The 5702/03 chips
@@ -14003,9 +14122,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
* Any tg3 device found behind the bridge will also need the 40-bit
* DMA workaround.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
- tg3_flag_set(tp, 5780_CLASS);
+ if (tg3_flag(tp, 5780_CLASS)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
} else {
@@ -14031,39 +14148,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
tp->pdev_peer = tg3_find_peer(tp);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
- tg3_flag_set(tp, 5717_PLUS);
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
- tg3_flag_set(tp, 57765_CLASS);
-
- if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
- tg3_flag_set(tp, 57765_PLUS);
-
- /* Intentionally exclude ASIC_REV_5906 */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- tg3_flag(tp, 57765_PLUS))
- tg3_flag_set(tp, 5755_PLUS);
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
- tg3_flag(tp, 5755_PLUS) ||
- tg3_flag(tp, 5780_CLASS))
- tg3_flag_set(tp, 5750_PLUS);
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
- tg3_flag(tp, 5750_PLUS))
- tg3_flag_set(tp, 5705_PLUS);
-
/* Determine TSO capabilities */
if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
@@ -14135,8 +14219,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
- tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
@@ -14160,12 +14242,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
- int readrq = pcie_get_readrq(tp->pdev);
- if (readrq > 2048)
- pcie_set_readrq(tp->pdev, 2048);
- }
-
pci_read_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
&lnkctl);
@@ -14395,13 +14471,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_ape_lock_init(tp);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- tg3_flag(tp, 57765_PLUS))
- tg3_flag_set(tp, CPMU_PRESENT);
-
/* Set up tp->grc_local_ctrl before calling
* tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
* will bring 5700's external PHY out of reset.
@@ -15336,34 +15405,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
return str;
}
-static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
-{
- struct pci_dev *peer;
- unsigned int func, devnr = tp->pdev->devfn & ~7;
-
- for (func = 0; func < 8; func++) {
- peer = pci_get_slot(tp->pdev->bus, devnr | func);
- if (peer && peer != tp->pdev)
- break;
- pci_dev_put(peer);
- }
- /* 5704 can be configured in single-port mode, set peer to
- * tp->pdev in that case.
- */
- if (!peer) {
- peer = tp->pdev;
- return peer;
- }
-
- /*
- * We don't need to keep the refcount elevated; there's no way
- * to remove one half of this device without removing the other
- */
- pci_dev_put(peer);
-
- return peer;
-}
-
static void __devinit tg3_init_coal(struct tg3 *tp)
{
struct ethtool_coalesce *ec = &tp->coal;
@@ -15395,39 +15436,6 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
}
}
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (!tp->hw_stats)
- return &tp->net_stats_prev;
-
- spin_lock_bh(&tp->lock);
- tg3_get_nstats(tp, stats);
- spin_unlock_bh(&tp->lock);
-
- return stats;
-}
-
-static const struct net_device_ops tg3_netdev_ops = {
- .ndo_open = tg3_open,
- .ndo_stop = tg3_close,
- .ndo_start_xmit = tg3_start_xmit,
- .ndo_get_stats64 = tg3_get_stats64,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = tg3_set_rx_mode,
- .ndo_set_mac_address = tg3_set_mac_addr,
- .ndo_do_ioctl = tg3_ioctl,
- .ndo_tx_timeout = tg3_tx_timeout,
- .ndo_change_mtu = tg3_change_mtu,
- .ndo_fix_features = tg3_fix_features,
- .ndo_set_features = tg3_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tg3_poll_controller,
-#endif
-};
-
static int __devinit tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -15472,7 +15480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_power_down;
}
@@ -15729,6 +15736,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_frob_aux_power(tp, false);
}
+ tg3_timer_init(tp);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -15854,7 +15863,7 @@ static int tg3_suspend(struct device *device)
tg3_phy_stop(tp);
tg3_netif_stop(tp);
- del_timer_sync(&tp->timer);
+ tg3_timer_stop(tp);
tg3_full_lock(tp, 1);
tg3_disable_ints(tp);
@@ -15878,8 +15887,7 @@ static int tg3_suspend(struct device *device)
if (err2)
goto out;
- tp->timer.expires = jiffies + tp->timer_offset;
- add_timer(&tp->timer);
+ tg3_timer_start(tp);
netif_device_attach(dev);
tg3_netif_start(tp);
@@ -15913,8 +15921,7 @@ static int tg3_resume(struct device *device)
if (err)
goto out;
- tp->timer.expires = jiffies + tp->timer_offset;
- add_timer(&tp->timer);
+ tg3_timer_start(tp);
tg3_netif_start(tp);
@@ -15962,11 +15969,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_netif_stop(tp);
- del_timer_sync(&tp->timer);
+ tg3_timer_stop(tp);
/* Want to make sure that the reset task doesn't run */
tg3_reset_task_cancel(tp);
- tg3_flag_clear(tp, TX_RECOVERY_PENDING);
netif_device_detach(netdev);
@@ -16059,8 +16065,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- tp->timer.expires = jiffies + tp->timer_offset;
- add_timer(&tp->timer);
+ tg3_timer_start(tp);
tg3_netif_start(tp);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index aea8f72c24fa..66bcfca55261 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2011 Broadcom Corporation.
+ * Copyright (C) 2007-2012 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -2702,19 +2702,8 @@ struct tg3_link_config {
u8 active_flowctrl;
u8 active_duplex;
-#define SPEED_INVALID 0xffff
-#define DUPLEX_INVALID 0xff
-#define AUTONEG_INVALID 0xff
u16 active_speed;
u32 rmt_adv;
-
- /* When we go in and out of low power mode we need
- * to swap with this state.
- */
- u16 orig_speed;
- u8 orig_duplex;
- u8 orig_autoneg;
- u32 orig_advertising;
};
struct tg3_bufmgr_config {
@@ -3075,6 +3064,7 @@ struct tg3 {
struct mii_bus *mdio_bus;
int mdio_irq[PHY_MAX_ADDR];
+ int old_link;
u8 phy_addr;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 29f284f79e02..689e5e19cc0b 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -203,7 +203,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
if (!bfa_nw_ioc_is_operational(cee->ioc))
return BFA_STATUS_IOC_FAILURE;
- if (cee->get_attr_pending == true)
+ if (cee->get_attr_pending)
return BFA_STATUS_DEVBUSY;
cee->get_attr_pending = true;
@@ -272,7 +272,7 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
- if (cee->get_attr_pending == true) {
+ if (cee->get_attr_pending) {
cee->get_attr_status = BFA_STATUS_FAILED;
cee->get_attr_pending = false;
if (cee->cbfn.get_attr_cbfn) {
@@ -281,7 +281,7 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
BFA_STATUS_FAILED);
}
}
- if (cee->get_stats_pending == true) {
+ if (cee->get_stats_pending) {
cee->get_stats_status = BFA_STATUS_FAILED;
cee->get_stats_pending = false;
if (cee->cbfn.get_stats_cbfn) {
@@ -290,7 +290,7 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
BFA_STATUS_FAILED);
}
}
- if (cee->reset_stats_pending == true) {
+ if (cee->reset_stats_pending) {
cee->reset_stats_status = BFA_STATUS_FAILED;
cee->reset_stats_pending = false;
if (cee->cbfn.reset_stats_cbfn) {
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 871c6309334c..48f877337390 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -297,6 +297,7 @@ enum bfa_mode {
#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
#define BFA_TOTAL_FLASH_SIZE 0x400000
+#define BFA_FLASH_PART_FWIMG 2
#define BFA_FLASH_PART_MFG 7
/*
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index abfad275b5f3..77977d735dd7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -692,7 +692,7 @@ static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
{
/* Call only the first time sm enters fwmismatch state. */
- if (iocpf->fw_mismatch_notified == false)
+ if (!iocpf->fw_mismatch_notified)
bfa_ioc_pf_fwmismatch(iocpf->ioc);
iocpf->fw_mismatch_notified = true;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index be7d91e4b785..ff78f770dec9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3284,7 +3284,6 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
netdev = alloc_etherdev(sizeof(struct bnad));
if (!netdev) {
- dev_err(&pdev->dev, "netdev allocation failed\n");
err = -ENOMEM;
return err;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 592ad3929f53..c9fdceb135f3 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -62,8 +62,6 @@ bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
- pci_name(bnad->pcidev));
return -ENOMEM;
}
@@ -105,8 +103,6 @@ bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bna %s: Failed to allocate fwsave buffer\n",
- pci_name(bnad->pcidev));
return -ENOMEM;
}
@@ -208,8 +204,6 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
if (!drv_info->debug_buffer) {
kfree(drv_info);
drv_info = NULL;
- pr_warn("bna %s: Failed to allocate drv info buffer\n",
- pci_name(bnad->pcidev));
return -ENOMEM;
}
@@ -348,11 +342,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
/* Allocate memory to store the user space buf */
kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf) {
- pr_warn("bna %s: Failed to allocate user buffer\n",
- pci_name(bnad->pcidev));
+ if (!kern_buf)
return -ENOMEM;
- }
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
@@ -373,11 +364,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
bnad->reglen = 0;
bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
- if (!bnad->regdata) {
- pr_warn("bna %s: Failed to allocate regrd buffer\n",
- pci_name(bnad->pcidev));
+ if (!bnad->regdata)
return -ENOMEM;
- }
bnad->reglen = len << 2;
rb = bfa_ioc_bar0(ioc);
@@ -421,11 +409,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
/* Allocate memory to store the user space buf */
kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf) {
- pr_warn("bna %s: Failed to allocate user buffer\n",
- pci_name(bnad->pcidev));
+ if (!kern_buf)
return -ENOMEM;
- }
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 803ea32aa99d..ab753d7334a6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1070,6 +1070,47 @@ done:
return ret;
}
+static int
+bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ const struct firmware *fw;
+ int ret = 0;
+
+ ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
+ if (ret) {
+ pr_err("BNA: Can't locate firmware %s\n", eflash->data);
+ goto out;
+ }
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irq(&bnad->bna_lock);
+ ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
+ bnad->id, (u8 *)fw->data, fw->size, 0,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ pr_warn("BNA: Flash update failed with err: %d\n", ret);
+ ret = -EIO;
+ spin_unlock_irq(&bnad->bna_lock);
+ goto out;
+ }
+
+ spin_unlock_irq(&bnad->bna_lock);
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.comp_status != BFA_STATUS_OK) {
+ ret = -EIO;
+ pr_warn("BNA: Firmware image update to flash failed with: %d\n",
+ fcomp.comp_status);
+ }
+out:
+ release_firmware(fw);
+ return ret;
+}
+
static const struct ethtool_ops bnad_ethtool_ops = {
.get_settings = bnad_get_settings,
.set_settings = bnad_set_settings,
@@ -1088,6 +1129,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.get_eeprom_len = bnad_get_eeprom_len,
.get_eeprom = bnad_get_eeprom,
.set_eeprom = bnad_set_eeprom,
+ .flash_device = bnad_flash_device,
};
void
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 1a5b6efa0120..906117016fc4 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -886,7 +886,7 @@ static void at91ether_rx(struct net_device *dev)
while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
p_recv = dlist->recv_buf[lp->rxBuffIndex];
pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
- skb = dev_alloc_skb(pktlen + 2);
+ skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2);
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 23200680d4c1..c4834c23be35 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -87,7 +87,7 @@ static void __init macb_get_hwaddr(struct macb *bp)
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
} else {
netdev_info(bp->dev, "invalid hw address, using random\n");
- random_ether_addr(bp->dev->dev_addr);
+ eth_hw_addr_random(bp->dev);
}
}
@@ -397,7 +397,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
first_frag, last_frag, len);
- skb = dev_alloc_skb(len + RX_OFFSET);
+ skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
if (!skb) {
bp->stats.rx_dropped++;
for (frag = first_frag; ; frag = NEXT_RX(frag)) {
@@ -1308,10 +1308,8 @@ static int __init macb_probe(struct platform_device *pdev)
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
- if (!dev) {
- dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+ if (!dev)
goto err_out;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 1fce186a9031..11f667f6131a 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1012,7 +1012,7 @@ static int xgmac_open(struct net_device *dev)
* address using the following linux command:
* ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
netdev_dbg(priv->dev, "generated random MAC address %pM\n",
dev->dev_addr);
}
@@ -1482,6 +1482,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 857cc254cab8..63bfdd10bd6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2499,7 +2499,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
index 8bda06e366c8..165bfb91487a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/version.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -35,10 +35,10 @@
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
-#define DRV_VERSION "1.1.4-ko"
+#define DRV_VERSION "1.1.5-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
-#define FW_VERSION_MINOR 10
+#define FW_VERSION_MINOR 12
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9d76e59d9526..05ff076af06d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2811,7 +2811,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
struct port_info *pi = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
pi->xact_addr_filt, addr->sa_data, true, true);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index d963c1d57f71..25e3308fc9d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1130,7 +1130,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
struct port_info *pi = netdev_priv(dev);
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
addr->sa_data, true);
@@ -2596,8 +2596,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_PORT_QSETS);
if (netdev == NULL) {
- dev_err(&pdev->dev, "cannot allocate netdev for"
- " port %d\n", port_id);
t4vf_free_vi(adapter, viid);
err = -ENOMEM;
goto err_free_dev;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index f328da24c8fa..d5ff93653e4c 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -911,7 +911,7 @@ dma_rx(struct net_device *dev)
}
/* Malloc up new buffer. */
- skb = dev_alloc_skb(length + 2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
if (net_debug) /* I don't think we want to do this to a stressed system */
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
@@ -1616,7 +1616,7 @@ net_rx(struct net_device *dev)
}
/* Malloc up new buffer. */
- skb = dev_alloc_skb(length + 2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
#if 0 /* Again, this seems a cruel thing to do */
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 4317af8d2f0a..78c55213eaf7 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -282,7 +282,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
if (rstat0 & RSTAT0_CRCI)
length -= 4;
- skb = dev_alloc_skb(length + 2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (likely(skb != NULL)) {
struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
skb_reserve(skb, 2);
@@ -859,7 +859,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
if (is_zero_ether_addr(dev->dev_addr))
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 83781f316d1f..932fdccc339a 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -591,11 +591,15 @@ static void set_multicast_list(struct net_device *dev)
static int set_mac_address(struct net_device *dev, void *addr)
{
+ struct sockaddr *saddr = addr;
int i;
- printk("%s: Setting MAC address to ", dev->name);
- for (i = 0; i < 6; i++)
- printk(" %2.2x", dev->dev_addr[i] = ((unsigned char *)addr)[i]);
- printk(".\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ printk("%s: Setting MAC address to %pM\n", dev->name, dev->dev_addr);
+
/* set the Ethernet address */
for (i=0; i < ETH_ALEN/2; i++)
writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index c52295cd05ef..afe9b1662b8c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,13 +32,13 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.31"
+#define DRV_VERSION "2.1.1.39"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
#define ENIC_WQ_MAX 1
-#define ENIC_RQ_MAX 1
+#define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 0e4edd3b6bee..77b4e873f91c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -865,6 +865,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
}
memcpy(netdev->dev_addr, addr, netdev->addr_len);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1068,9 +1069,18 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (err)
return err;
- if (is_valid_ether_addr(mac)) {
- memcpy(pp->vf_mac, mac, ETH_ALEN);
- return 0;
+ if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
+ if (vf == PORT_SELF_VF) {
+ memcpy(pp->vf_mac, mac, ETH_ALEN);
+ return 0;
+ } else {
+ /*
+ * For sriov vf's set the mac in hw
+ */
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
+ vnic_dev_set_mac_addr, mac);
+ return enic_dev_status_to_errno(err);
+ }
} else
return -EINVAL;
}
@@ -1114,12 +1124,23 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
}
- /* Special case handling: mac came from IFLA_VF_MAC */
- if (!is_zero_ether_addr(prev_pp.vf_mac))
- memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
+ if (vf == PORT_SELF_VF) {
+ /* Special case handling: mac came from IFLA_VF_MAC */
+ if (!is_zero_ether_addr(prev_pp.vf_mac))
+ memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
- if (vf == PORT_SELF_VF && is_zero_ether_addr(netdev->dev_addr))
- random_ether_addr(netdev->dev_addr);
+ if (is_zero_ether_addr(netdev->dev_addr))
+ eth_hw_addr_random(netdev);
+ } else {
+ /* SR-IOV VF: get mac from adapter */
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
+ vnic_dev_get_mac_addr, pp->mac_addr);
+ if (err) {
+ netdev_err(netdev, "Error getting mac for vf %d\n", vf);
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return enic_dev_status_to_errno(err);
+ }
+ }
err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
if (err) {
@@ -1147,7 +1168,8 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
}
}
- memset(pp->vf_mac, 0, ETH_ALEN);
+ if (vf == PORT_SELF_VF)
+ memset(pp->vf_mac, 0, ETH_ALEN);
return err;
}
@@ -2280,10 +2302,8 @@ static int __devinit enic_probe(struct pci_dev *pdev,
*/
netdev = alloc_etherdev(sizeof(struct enic));
- if (!netdev) {
- pr_err("Etherdev alloc failed, aborting\n");
+ if (!netdev)
return -ENOMEM;
- }
pci_set_drvdata(pdev, netdev);
@@ -2388,7 +2408,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
/* Allocate structure for port profiles */
enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
if (!enic->pp) {
- pr_err("port profile alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_sriov_pp;
}
@@ -2433,7 +2452,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
* called later by an upper layer.
*/
- if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) {
+ if (!enic_is_dynamic(enic)) {
err = vnic_dev_init(enic->vdev, 0);
if (err) {
dev_err(dev, "vNIC dev init failed, aborting\n");
@@ -2466,11 +2485,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->port_mtu = enic->config.mtu;
(void)enic_change_mtu(netdev, enic->port_mtu);
-#ifdef CONFIG_PCI_IOV
- if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr))
- random_ether_addr(enic->mac_addr);
-#endif
-
err = enic_set_mac_addr(netdev, enic->mac_addr);
if (err) {
dev_err(dev, "Invalid MAC address, aborting\n");
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index c347b6236f8f..dafea1ecb7b1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -207,7 +207,7 @@ static int enic_pp_disassociate(struct enic *enic, int vf,
if (!is_zero_ether_addr(pp->mac_addr))
ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
pp->mac_addr);
- else if (!is_zero_ether_addr(netdev->dev_addr))
+ else if (vf == PORT_SELF_VF && !is_zero_ether_addr(netdev->dev_addr))
ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
netdev->dev_addr);
@@ -294,7 +294,7 @@ static int enic_pp_associate(struct enic *enic, int vf,
if (!is_zero_ether_addr(pp->mac_addr))
ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
pp->mac_addr);
- else if (!is_zero_ether_addr(netdev->dev_addr))
+ else if (vf == PORT_SELF_VF && !is_zero_ether_addr(netdev->dev_addr))
ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
netdev->dev_addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 4a35367de790..31d658880c3c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -44,7 +44,7 @@ int enic_get_vnic_config(struct enic *enic)
struct vnic_enet_config *c = &enic->config;
int err;
- err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
+ err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
if (err) {
dev_err(enic_get_dev(enic),
"Error getting MAC addr, %d\n", err);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 31e7f9bc2067..605b22283be1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -439,11 +439,12 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
a1 = sizeof(struct vnic_devcmd_fw_info);
/* only get fw_info once and cache it */
- err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
- if (err == ERR_ECMDUNKNOWN) {
+ if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
+ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
+ &a0, &a1, wait);
+ else
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
&a0, &a1, wait);
- }
}
*fw_info = vdev->fw_info;
@@ -504,13 +505,11 @@ int vnic_dev_enable_wait(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
- int err;
- err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
- if (err == ERR_ECMDUNKNOWN)
+ if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
+ return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ else
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
-
- return err;
}
int vnic_dev_disable(struct vnic_dev *vdev)
@@ -574,16 +573,15 @@ int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
int wait = 1000;
int err;
- err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
- if (err == ERR_ECMDUNKNOWN) {
+ if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
+ return vnic_dev_cmd(vdev, CMD_HANG_RESET,
+ &a0, &a1, wait);
+ } else {
err = vnic_dev_soft_reset(vdev, arg);
if (err)
return err;
-
return vnic_dev_init(vdev, 0);
}
-
- return err;
}
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
@@ -594,11 +592,13 @@ int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
*done = 0;
- err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
- if (err) {
- if (err == ERR_ECMDUNKNOWN)
- return vnic_dev_soft_reset_done(vdev, done);
- return err;
+ if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
+ err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
+ &a0, &a1, wait);
+ if (err)
+ return err;
+ } else {
+ return vnic_dev_soft_reset_done(vdev, done);
}
*done = (a0 == 0);
@@ -613,7 +613,7 @@ int vnic_dev_hang_notify(struct vnic_dev *vdev)
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
-int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a0, a1;
int wait = 1000;
@@ -622,7 +622,7 @@ int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
if (err)
return err;
@@ -691,13 +691,12 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
{
u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
int wait = 1000;
- int err;
- err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
- if (err == ERR_ECMDUNKNOWN)
+ if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
+ return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
+ &a0, &a1, wait);
+ else
return 0;
-
- return err;
}
static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
@@ -804,7 +803,7 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
/* Emulate these for old CMD_INIT_v1 which
* didn't pass a0 so no CMD_INITF_*.
*/
- vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
}
}
@@ -835,7 +834,10 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
memset(vdev->args, 0, sizeof(vdev->args));
- err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+ if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
+ err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+ else
+ err = ERR_ECMDUNKNOWN;
/* Use defaults when firmware doesn't support the devcmd at all or
* supports it for only specific hardware
@@ -848,9 +850,11 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
return 0;
}
- vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
- vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
- vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
+ if (!err) {
+ vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
+ vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
+ vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
+ }
return err;
}
@@ -1019,3 +1023,15 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
{
return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
}
+
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = mac_addr[i];
+
+ return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 6a138b625d13..f3d9b79ba77e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -97,7 +97,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
-int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
int vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
@@ -131,5 +131,6 @@ int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 8025e8808d61..23d555255cf8 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -131,7 +131,7 @@ enum vnic_devcmd_cmd {
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
- CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
/* add addr from (u48)a0 */
@@ -337,6 +337,15 @@ enum vnic_devcmd_cmd {
* (u32)a2 = maximum timer value in usec
*/
CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+
+ /*
+ * cmd_set_mac_addr
+ * set mac address
+ * in:
+ * (u48)a0 = mac addr
+ *
+ */
+ CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 34105e0951a5..7e1488fc8ab2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -38,10 +38,8 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
- if (!rq->bufs[i]) {
- pr_err("Failed to alloc rq_bufs\n");
+ if (!rq->bufs[i])
return -ENOMEM;
- }
}
for (i = 0; i < blks; i++) {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index df61bd932ea6..5e0d7a2be9bc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -38,10 +38,8 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
- if (!wq->bufs[i]) {
- pr_err("Failed to alloc wq_bufs\n");
+ if (!wq->bufs[i])
return -ENOMEM;
- }
}
for (i = 0; i < blks; i++) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f801754c71a7..36499d5edd95 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1028,7 +1028,7 @@ dm9000_rx(struct net_device *dev)
/* Move data from DM9000 */
if (GoodPacket &&
- ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
+ ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb_reserve(skb, 2);
rdptr = (u8 *) skb_put(skb, RxLen - 4);
@@ -1373,10 +1373,8 @@ dm9000_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
- if (!ndev) {
- dev_err(&pdev->dev, "could not allocate device.\n");
+ if (!ndev)
return -ENOMEM;
- }
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1587,7 +1585,7 @@ dm9000_probe(struct platform_device *pdev)
dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
- random_ether_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
mac_src = "random";
}
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index f9df5e4d0341..1879f84a25a3 100644
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -986,8 +986,10 @@ static int ewrk3_rx(struct net_device *dev)
dev->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
+ skb = netdev_alloc_skb(dev,
+ pkt_len + 2);
- if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ if (skb != NULL) {
unsigned char *p;
skb_reserve(skb, 2); /* Align to 16 bytes */
p = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 25b8deedbef8..369858272650 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/21142.c
+ drivers/net/ethernet/dec/tulip/21142.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 1eb46a0bb488..68f1c39184df 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -439,7 +439,7 @@ static void de_rx (struct de_private *de)
rx_tail, status, len, copying_skb);
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
- copy_skb = dev_alloc_skb (buflen);
+ copy_skb = netdev_alloc_skb(de->dev, buflen);
if (unlikely(!copy_skb)) {
de->net_stats.rx_dropped++;
drop = 1;
@@ -1283,12 +1283,10 @@ static int de_refill_rx (struct de_private *de)
for (i = 0; i < DE_RX_RING_SIZE; i++) {
struct sk_buff *skb;
- skb = dev_alloc_skb(de->rx_buf_sz);
+ skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
if (!skb)
goto err_out;
- skb->dev = de->dev;
-
de->rx_skb[i].mapping = pci_map_single(de->pdev,
skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
de->rx_skb[i].skb = skb;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 4d71f5ae20c8..18b106cc6d2b 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3598,7 +3598,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
struct sk_buff *ret;
u_long i=0, tmp;
- p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
+ p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
if (!p) return NULL;
tmp = virt_to_bus(p->data);
@@ -3618,7 +3618,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
#else
if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
- p = dev_alloc_skb(len + 2);
+ p = netdev_alloc_skb(dev, len + 2);
if (!p) return NULL;
skb_reserve(p, 2); /* Align */
@@ -5234,11 +5234,7 @@ de4x5_dbg_open(struct net_device *dev)
if (de4x5_debug & DEBUG_OPEN) {
printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
- printk("\tphysical address: ");
- for (i=0;i<6;i++) {
- printk("%2.2x:",(short)dev->dev_addr[i]);
- }
- printk("\n");
+ printk("\tphysical address: %pM\n", dev->dev_addr);
printk("Descriptor head addresses:\n");
printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
printk("Descriptor addresses:\nRX: ");
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 51f7542eb451..1eccf4945485 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -325,8 +325,8 @@ static irqreturn_t dmfe_interrupt(int , void *);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_dmfe (struct net_device *dev);
#endif
-static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
-static void allocate_rx_buffer(struct dmfe_board_info *);
+static void dmfe_descriptor_init(struct net_device *, unsigned long);
+static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, unsigned long);
static void send_filter_frame(struct DEVICE *);
static void dm9132_id_table(struct DEVICE *);
@@ -649,7 +649,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
db->op_mode = db->media_mode; /* Force Mode */
/* Initialize Transmit/Receive decriptor and CR3/4 */
- dmfe_descriptor_init(db, ioaddr);
+ dmfe_descriptor_init(dev, ioaddr);
/* Init CR6 to program DM910x operation */
update_cr6(db->cr6_data, ioaddr);
@@ -828,7 +828,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
/* reallocate rx descriptor buffer */
if (db->rx_avail_cnt<RX_DESC_CNT)
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
/* Free the transmitted descriptor */
if ( db->cr5_data & 0x01)
@@ -1008,7 +1008,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ((rxlen < RX_COPY_SIZE) &&
- ((newskb = dev_alloc_skb(rxlen + 2))
+ ((newskb = netdev_alloc_skb(dev, rxlen + 2))
!= NULL)) {
skb = newskb;
@@ -1364,8 +1364,9 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
* Using Chain structure, and allocate Tx/Rx buffer
*/
-static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
+static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
{
+ struct dmfe_board_info *db = netdev_priv(dev);
struct tx_desc *tmp_tx;
struct rx_desc *tmp_rx;
unsigned char *tmp_buf;
@@ -1421,7 +1422,7 @@ static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioadd
tmp_rx->next_rx_desc = db->first_rx_desc;
/* pre-allocate Rx buffer */
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
}
@@ -1551,15 +1552,16 @@ static void send_filter_frame(struct DEVICE *dev)
* As possible as allocate maxiumn Rx buffer
*/
-static void allocate_rx_buffer(struct dmfe_board_info *db)
+static void allocate_rx_buffer(struct net_device *dev)
{
+ struct dmfe_board_info *db = netdev_priv(dev);
struct rx_desc *rxptr;
struct sk_buff *skb;
rxptr = db->rx_insert_ptr;
while(db->rx_avail_cnt < RX_DESC_CNT) {
- if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index 14d5b611783d..ed7d1dcd9566 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/eeprom.c
+ drivers/net/ethernet/dec/tulip/eeprom.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 4fb8c8c0a420..28a5e425fecf 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/interrupt.c
+ drivers/net/ethernet/dec/tulip/interrupt.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
@@ -69,7 +69,8 @@ int tulip_refill_rx(struct net_device *dev)
struct sk_buff *skb;
dma_addr_t mapping;
- skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = tp->rx_buffers[entry].skb =
+ netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL)
break;
@@ -77,7 +78,6 @@ int tulip_refill_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
tp->rx_buffers[entry].mapping = mapping;
- skb->dev = dev; /* Mark as being used by this device. */
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
refilled++;
}
@@ -202,7 +202,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < tulip_rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(tp->pdev,
tp->rx_buffers[entry].mapping,
@@ -428,7 +428,7 @@ static int tulip_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < tulip_rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(tp->pdev,
tp->rx_buffers[entry].mapping,
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index beeb17b52ad4..ae937c6749e7 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/media.c
+ drivers/net/ethernet/dec/tulip/media.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 9c16e4ad02a6..5364563c4378 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/pnic.c
+ drivers/net/ethernet/dec/tulip/pnic.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 04a7e477eaff..5895fc43f6e0 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/pnic2.c
+ drivers/net/ethernet/dec/tulip/pnic2.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 19078d28ffb9..768379b8aee9 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/timer.c
+ drivers/net/ethernet/dec/tulip/timer.c
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index fb3887c18dc6..38431a155f09 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -1,5 +1,5 @@
/*
- drivers/net/tulip/tulip.h
+ drivers/net/ethernet/dec/tulip/tulip.h
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4eb0d76145c2..fea3641d9398 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -636,16 +636,15 @@ static void tulip_init_ring(struct net_device *dev)
dma_addr_t mapping;
/* Note the receive buffer must be longword aligned.
- dev_alloc_skb() provides 16 byte alignment. But do *not*
+ netdev_alloc_skb() provides 16 byte alignment. But do *not*
use skb_reserve() to align the IP header! */
- struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
tp->rx_buffers[i].skb = skb;
if (skb == NULL)
break;
mapping = pci_map_single(tp->pdev, skb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
tp->rx_buffers[i].mapping = mapping;
- skb->dev = dev; /* Mark as being used by this device. */
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
}
@@ -1424,10 +1423,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
- if (!dev) {
- pr_err("ether device alloc failed, aborting\n");
+ if (!dev)
return -ENOMEM;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 48b0b6566eef..fc4001f6a5e4 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -232,8 +232,8 @@ static irqreturn_t uli526x_interrupt(int, void *);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uli526x_poll(struct net_device *dev);
#endif
-static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
-static void allocate_rx_buffer(struct uli526x_board_info *);
+static void uli526x_descriptor_init(struct net_device *, unsigned long);
+static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, unsigned long);
static void send_filter_frame(struct net_device *, int);
static u16 phy_read(unsigned long, u8, u8, u32);
@@ -549,7 +549,7 @@ static void uli526x_init(struct net_device *dev)
db->op_mode = db->media_mode; /* Force Mode */
/* Initialize Transmit/Receive decriptor and CR3/4 */
- uli526x_descriptor_init(db, ioaddr);
+ uli526x_descriptor_init(dev, ioaddr);
/* Init CR6 to program M526X operation */
update_cr6(db->cr6_data, ioaddr);
@@ -711,7 +711,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
/* reallocate rx descriptor buffer */
if (db->rx_avail_cnt<RX_DESC_CNT)
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
/* Free the transmitted descriptor */
if ( db->cr5_data & 0x01)
@@ -844,7 +844,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ((rxlen < RX_COPY_SIZE) &&
- (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+ (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) {
skb = new_skb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb_reserve(skb, 2); /* 16byte align */
@@ -1289,8 +1289,9 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
* Using Chain structure, and allocate Tx/Rx buffer
*/
-static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
+static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr)
{
+ struct uli526x_board_info *db = netdev_priv(dev);
struct tx_desc *tmp_tx;
struct rx_desc *tmp_rx;
unsigned char *tmp_buf;
@@ -1343,7 +1344,7 @@ static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long
tmp_rx->next_rx_desc = db->first_rx_desc;
/* pre-allocate Rx buffer */
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
}
@@ -1433,15 +1434,17 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
* As possible as allocate maxiumn Rx buffer
*/
-static void allocate_rx_buffer(struct uli526x_board_info *db)
+static void allocate_rx_buffer(struct net_device *dev)
{
+ struct uli526x_board_info *db = netdev_priv(dev);
struct rx_desc *rxptr;
struct sk_buff *skb;
rxptr = db->rx_insert_ptr;
while(db->rx_avail_cnt < RX_DESC_CNT) {
- if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
+ if (skb == NULL)
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 52da7b2fe3b6..2ac6fff0363a 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -815,7 +815,7 @@ static void init_rxtx_rings(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1231,7 +1231,7 @@ static int netdev_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
@@ -1270,7 +1270,7 @@ static int netdev_rx(struct net_device *dev)
struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(np->rx_buf_sz);
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 988b8eb24d37..fdb329fe6e8e 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -222,10 +222,9 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
is available.
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
- if (!dev) {
- pr_err("%s: failed to allocate etherdev\n", __func__);
+ if (!dev)
goto device_fail;
- }
+
private = netdev_priv(dev);
/* Allocate the send/receive buffers */
@@ -1085,7 +1084,7 @@ investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
pkt_len = 1518;
}
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
dev->stats.rx_dropped++;
goto out;
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
index c24fab1e9cbe..682750c052c8 100644
--- a/drivers/net/ethernet/dlink/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -335,7 +335,7 @@ static void de600_rx_intr(struct net_device *dev)
return;
}
- skb = dev_alloc_skb(size+2);
+ skb = netdev_alloc_skb(dev, size + 2);
if (skb == NULL) {
printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
return;
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c
index 3b934ab784d3..afc5aaac6b60 100644
--- a/drivers/net/ethernet/dlink/de620.c
+++ b/drivers/net/ethernet/dlink/de620.c
@@ -650,7 +650,7 @@ static int de620_rx_intr(struct net_device *dev)
printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
}
else { /* Good packet? */
- skb = dev_alloc_skb(size+2);
+ skb = netdev_alloc_skb(dev, size + 2);
if (skb == NULL) { /* Yeah, but no place to put it... */
printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 28a3a9b50b8b..d783f4f96ec0 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1020,11 +1020,11 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
+ struct sk_buff *skb =
+ netdev_alloc_skb(dev, np->rx_buf_sz + 2);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
np->rx_ring[i].frag[0].addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
@@ -1358,7 +1358,7 @@ static void rx_poll(unsigned long data)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev,
le32_to_cpu(desc->frag[0].addr),
@@ -1411,11 +1411,10 @@ static void refill_rx (struct net_device *dev)
struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(np->rx_buf_sz + 2);
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
np->rx_ring[entry].frag[0].addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
@@ -1602,7 +1601,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data)
const struct sockaddr *addr = data;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
__set_mac_addr(dev);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 925c9bafc9b9..b276469f74e9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -421,7 +421,7 @@ static int dnet_poll(struct napi_struct *napi, int budget)
printk(KERN_ERR "%s packet receive error %x\n",
__func__, cmd_word);
- skb = dev_alloc_skb(pkt_len + 5);
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
if (skb != NULL) {
/* Align IP on 16 byte boundaries */
skb_reserve(skb, 2);
@@ -854,10 +854,8 @@ static int __devinit dnet_probe(struct platform_device *pdev)
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
- if (!dev) {
- dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+ if (!dev)
goto err_out_release_mem;
- }
/* TODO: Actually, we have some interesting features... */
dev->features |= 0;
@@ -897,7 +895,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(dev->dev_addr)) {
/* choose a random ethernet address */
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
__dnet_set_hwaddr(bp);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cbdec2536da6..9576ac002c23 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -33,7 +33,7 @@
#include "be_hw.h"
-#define DRV_VER "4.0.100u"
+#define DRV_VER "4.2.116u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -52,6 +52,10 @@
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
+#define OC_SUBSYS_DEVICE_ID1 0xE602
+#define OC_SUBSYS_DEVICE_ID2 0xE642
+#define OC_SUBSYS_DEVICE_ID3 0xE612
+#define OC_SUBSYS_DEVICE_ID4 0xE652
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -74,11 +78,14 @@ static inline char *nic_name(struct pci_dev *pdev)
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN ((u16) 64)
+/* allocate extra space to allow tunneling decapsulation without head reallocation */
+#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
+
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
-#define BE_MAX_EQD 96
+#define BE_MAX_EQD 96u
#define BE_MAX_TX_FRAG_COUNT 30
#define EVNT_Q_LEN 1024
@@ -89,12 +96,16 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
-#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
+#define BE3_MAX_RSS_QS 8
+#define BE2_MAX_RSS_QS 4
+#define MAX_RSS_QS BE3_MAX_RSS_QS
#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+
#define MAX_TX_QS 8
-#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
+#define MAX_MSIX_VECTORS MAX_RSS_QS
+#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64
-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define FW_VER_LEN 32
@@ -162,13 +173,16 @@ struct be_eq_obj {
/* Adaptive interrupt coalescing (AIC) info */
bool enable_aic;
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
- u8 eq_idx;
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 eqd; /* configured val when aic is off */
+ u32 cur_eqd; /* in usecs */
+ u8 idx; /* array index */
+ u16 tx_budget;
struct napi_struct napi;
-};
+ struct be_adapter *adapter;
+} ____cacheline_aligned_in_smp;
struct be_mcc_obj {
struct be_queue_info q;
@@ -194,7 +208,7 @@ struct be_tx_obj {
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
-};
+} ____cacheline_aligned_in_smp;
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
@@ -212,8 +226,6 @@ struct be_rx_stats {
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
- u32 rx_polls; /* NAPI calls */
- u32 rx_events;
u32 rx_compl;
u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */
@@ -246,23 +258,19 @@ struct be_rx_obj {
struct be_queue_info cq;
struct be_rx_compl_info rxcp;
struct be_rx_page_info page_info_tbl[RX_Q_LEN];
- struct be_eq_obj rx_eq;
struct be_rx_stats stats;
u8 rss_id;
bool rx_post_starved; /* Zero rx frags have been posted to BE */
- u32 cache_line_barrier[16];
-};
+} ____cacheline_aligned_in_smp;
struct be_drv_stats {
u32 be_on_die_temperature;
- u32 tx_events;
u32 eth_red_drops;
u32 rx_drops_no_pbuf;
u32 rx_drops_no_txpb;
u32 rx_drops_no_erx_descr;
u32 rx_drops_no_tpre_descr;
u32 rx_drops_too_many_frags;
- u32 rx_drops_invalid_ring;
u32 forwarded_packets;
u32 rx_drops_mtu;
u32 rx_crc_errors;
@@ -273,7 +281,7 @@ struct be_drv_stats {
u32 rx_in_range_errors;
u32 rx_out_range_errors;
u32 rx_frame_too_long;
- u32 rx_address_match_errors;
+ u32 rx_address_mismatch_drops;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
@@ -295,11 +303,15 @@ struct be_vf_cfg {
unsigned char mac_addr[ETH_ALEN];
int if_handle;
int pmac_id;
+ u16 def_vid;
u16 vlan_tag;
u32 tx_rate;
};
#define BE_FLAGS_LINK_STATUS_INIT 1
+#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
+#define BE_UC_PMAC_COUNT 30
+#define BE_VF_UC_PMAC_COUNT 2
struct be_adapter {
struct pci_dev *pdev;
@@ -318,20 +330,19 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
u32 num_msix_vec;
+ u32 num_evt_qs;
+ struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
+ struct msix_entry msix_entries[MAX_MSIX_VECTORS];
bool isr_registered;
/* TX Rings */
- struct be_eq_obj tx_eq;
+ u32 num_tx_qs;
struct be_tx_obj tx_obj[MAX_TX_QS];
- u8 num_tx_qs;
-
- u32 cache_line_break[8];
/* Rx rings */
- struct be_rx_obj rx_obj[MAX_RX_QS];
u32 num_rx_qs;
+ struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 eq_next_idx;
@@ -353,7 +364,7 @@ struct be_adapter {
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
int if_handle; /* Used to configure filtering */
- u32 pmac_id; /* MAC addr handle used by BE card */
+ u32 *pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
bool eeh_err;
@@ -361,7 +372,6 @@ struct be_adapter {
bool fw_timeout;
u32 port_num;
bool promiscuous;
- bool wol;
u32 function_mode;
u32 function_caps;
u32 rx_fc; /* Rx flow control */
@@ -382,6 +392,10 @@ struct be_adapter {
u32 sli_family;
u8 hba_port_num;
u16 pvid;
+ u8 wol_cap;
+ bool wol;
+ u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
+ u32 uc_macs; /* Count of secondary UC MAC programmed */
};
#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -402,24 +416,34 @@ struct be_adapter {
extern const struct ethtool_ops be_ethtool_ops;
#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
-#define tx_stats(txo) (&txo->stats)
-#define rx_stats(rxo) (&rxo->stats)
+#define num_irqs(adapter) (msix_enabled(adapter) ? \
+ adapter->num_msix_vec : 1)
+#define tx_stats(txo) (&(txo)->stats)
+#define rx_stats(rxo) (&(rxo)->stats)
-#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
+/* The default RXQ is the last RXQ */
+#define default_rxo(adpt) (&adpt->rx_obj[adpt->num_rx_qs - 1])
#define for_all_rx_queues(adapter, rxo, i) \
for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
i++, rxo++)
-/* Just skip the first default non-rss queue */
+/* Skip the default non-rss queue (last one)*/
#define for_all_rss_queues(adapter, rxo, i) \
- for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
i++, rxo++)
#define for_all_tx_queues(adapter, txo, i) \
for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
i++, txo++)
+#define for_all_evt_queues(adapter, eqo, i) \
+ for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
+ i++, eqo++)
+
+#define is_mcc_eqo(eqo) (eqo->idx == 0)
+#define mcc_eqo(adapter) (&adapter->eq_obj[0])
+
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
@@ -428,10 +452,6 @@ extern const struct ethtool_ops be_ethtool_ops;
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
-/* Byte offset into the page corresponding to given address */
-#define OFFSET_IN_PAGE(addr) \
- ((size_t)(addr) & (PAGE_SIZE_4K-1))
-
/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field) \
(((size_t)&(((_struct *)0)->field))%32)
@@ -539,9 +559,28 @@ static inline bool be_error(struct be_adapter *adapter)
return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
}
+static inline bool be_is_wol_excluded(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (!be_physfn(adapter))
+ return true;
+
+ switch (pdev->subsystem_device) {
+ case OC_SUBSYS_DEVICE_ID1:
+ case OC_SUBSYS_DEVICE_ID2:
+ case OC_SUBSYS_DEVICE_ID3:
+ case OC_SUBSYS_DEVICE_ID4:
+ return true;
+ default:
+ return false;
+ }
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
extern void be_parse_stats(struct be_adapter *adapter);
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
+extern bool be_is_wol_supported(struct be_adapter *adapter);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0fcb45624796..67b030d72df1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -235,10 +235,10 @@ void be_async_mcc_disable(struct be_adapter *adapter)
adapter->mcc_obj.rearm_cq = false;
}
-int be_process_mcc(struct be_adapter *adapter, int *status)
+int be_process_mcc(struct be_adapter *adapter)
{
struct be_mcc_compl *compl;
- int num = 0;
+ int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock);
@@ -252,32 +252,32 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
be_async_grp5_evt_process(adapter,
compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- *status = be_mcc_compl_process(adapter, compl);
+ status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used);
}
be_mcc_compl_use(compl);
num++;
}
+ if (num)
+ be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
+
spin_unlock_bh(&adapter->mcc_cq_lock);
- return num;
+ return status;
}
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
#define mcc_timeout 120000 /* 12s timeout */
- int i, num, status = 0;
+ int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
for (i = 0; i < mcc_timeout; i++) {
if (be_error(adapter))
return -EIO;
- num = be_process_mcc(adapter, &status);
- if (num)
- be_cq_notify(adapter, mcc_obj->cq.id,
- mcc_obj->rearm_cq, num);
+ status = be_process_mcc(adapter);
if (atomic_read(&mcc_obj->q.used) == 0)
break;
@@ -726,9 +726,8 @@ err:
}
/* Uses Mbox */
-int be_cmd_cq_create(struct be_adapter *adapter,
- struct be_queue_info *cq, struct be_queue_info *eq,
- bool sol_evts, bool no_delay, int coalesce_wm)
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req;
@@ -759,7 +758,6 @@ int be_cmd_cq_create(struct be_adapter *adapter,
ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
} else {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
coalesce_wm);
@@ -768,11 +766,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
__ilog2_u32(cq->len/256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_be, solevent,
- ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -973,7 +968,7 @@ err:
/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
+ u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -997,7 +992,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req->num_pages = 2;
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req->interface_id = cpu_to_le32(if_id);
- req->max_frame_size = cpu_to_le16(max_frame_size);
+ req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
req->rss_queue = cpu_to_le32(rss);
status = be_mcc_notify_wait(adapter);
@@ -1257,11 +1252,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
}
req = embedded_payload(wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+
if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
req->hdr.version = 1;
- be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+ req->hdr.domain = dom;
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1697,7 +1694,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
- req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
memcpy(req->cpu_table, rsstable, table_size);
memcpy(req->hash, myhash, sizeof(myhash));
@@ -2298,52 +2296,81 @@ err:
/* Uses synchronous MCCQ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- u32 *pmac_id)
+ bool *pmac_id_active, u32 *pmac_id, u8 *mac)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
int status;
int mac_count;
+ struct be_dma_mem get_mac_list_cmd;
+ int i;
+
+ memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
+ get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
+ get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma);
+
+ if (!get_mac_list_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure during GET_MAC_LIST\n");
+ return -ENOMEM;
+ }
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
- goto err;
+ goto out;
}
- req = embedded_payload(wrb);
+
+ req = get_mac_list_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
- wrb, NULL);
+ wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
+ req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
+ req->perm_override = 1;
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_mac_list *resp =
- embedded_payload(wrb);
- int i;
- u8 *ctxt = &resp->context[0][0];
- status = -EIO;
- mac_count = resp->mac_count;
- be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+ get_mac_list_cmd.va;
+ mac_count = resp->true_mac_count + resp->pseudo_mac_count;
+ /* Mac list returned could contain one or more active mac_ids
+ * or one or more pseudo permanant mac addresses. If an active
+ * mac_id is present, return first active mac_id found
+ */
for (i = 0; i < mac_count; i++) {
- if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
- act, ctxt)) {
- *pmac_id = AMAP_GET_BITS
- (struct amap_get_mac_list_context,
- macid, ctxt);
- status = 0;
- break;
+ struct get_list_macaddr *mac_entry;
+ u16 mac_addr_size;
+ u32 mac_id;
+
+ mac_entry = &resp->macaddr_list[i];
+ mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
+ /* mac_id is a 32 bit value and mac_addr size
+ * is 6 bytes
+ */
+ if (mac_addr_size == sizeof(u32)) {
+ *pmac_id_active = true;
+ mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
+ *pmac_id = le32_to_cpu(mac_id);
+ goto out;
}
- ctxt += sizeof(struct amap_get_mac_list_context) / 8;
}
+ /* If no active mac_id found, return first pseudo mac addr */
+ *pmac_id_active = false;
+ memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
+ ETH_ALEN);
}
-err:
+out:
spin_unlock_bh(&adapter->mcc_lock);
+ pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
@@ -2391,3 +2418,141 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
+ u32 domain, u16 intf_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_hsw_config *req;
+ void *ctxt;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+
+ req->hdr.domain = domain;
+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
+ if (pvid) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
+ }
+
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Get Hyper switch config */
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
+ u32 domain, u16 intf_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_hsw_config *req;
+ void *ctxt;
+ int status;
+ u16 vid;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+
+ req->hdr.domain = domain;
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
+ intf_id);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_hsw_config *resp =
+ embedded_payload(wrb);
+ be_dws_le_to_cpu(&resp->context,
+ sizeof(resp->context));
+ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ pvid, &resp->context);
+ *pvid = le16_to_cpu(vid);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_acpi_wol_magic_config_v1 *req;
+ int status;
+ int payload_len = sizeof(*req);
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ payload_len, wrb, &cmd);
+
+ req->hdr.version = 1;
+ req->query_options = BE_GET_WOL_CAP;
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
+ resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
+
+ /* the command could succeed misleadingly on old f/w
+ * which is not aware of the V1 version. fake an error. */
+ if (resp->hdr.response_length < payload_len) {
+ status = -1;
+ goto err;
+ }
+ adapter->wol_cap = resp->wol_settings;
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index dca89249088f..d5b680c56af0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -191,6 +191,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
#define OPCODE_COMMON_GET_MAC_LIST 147
#define OPCODE_COMMON_SET_MAC_LIST 148
+#define OPCODE_COMMON_GET_HSW_CONFIG 152
+#define OPCODE_COMMON_SET_HSW_CONFIG 153
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
@@ -592,8 +594,8 @@ struct be_port_rxf_stats_v0 {
u32 rx_in_range_errors; /* dword 10*/
u32 rx_out_range_errors; /* dword 11*/
u32 rx_frame_too_long; /* dword 12*/
- u32 rx_address_match_errors; /* dword 13*/
- u32 rx_vlan_mismatch; /* dword 14*/
+ u32 rx_address_mismatch_drops; /* dword 13*/
+ u32 rx_vlan_mismatch_drops; /* dword 14*/
u32 rx_dropped_too_small; /* dword 15*/
u32 rx_dropped_too_short; /* dword 16*/
u32 rx_dropped_header_too_small; /* dword 17*/
@@ -799,8 +801,8 @@ struct lancer_pport_stats {
u32 rx_control_frames_unknown_opcode_hi;
u32 rx_in_range_errors;
u32 rx_out_of_range_errors;
- u32 rx_address_match_errors;
- u32 rx_vlan_mismatch_errors;
+ u32 rx_address_mismatch_drops;
+ u32 rx_vlan_mismatch_drops;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
@@ -1206,6 +1208,33 @@ struct be_cmd_req_acpi_wol_magic_config{
u8 rsvd2[2];
} __packed;
+struct be_cmd_req_acpi_wol_magic_config_v1 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[2];
+ u8 query_options;
+ u8 rsvd1[5];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
+} __packed;
+
+struct be_cmd_resp_acpi_wol_magic_config_v1 {
+ struct be_cmd_resp_hdr hdr;
+ u8 rsvd0[2];
+ u8 wol_settings;
+ u8 rsvd1[5];
+ u32 rsvd2[295];
+} __packed;
+
+#define BE_GET_WOL_CAP 2
+
+#define BE_WOL_CAP 0x1
+#define BE_PME_D0_CAP 0x8
+#define BE_PME_D1_CAP 0x10
+#define BE_PME_D2_CAP 0x20
+#define BE_PME_D3HOT_CAP 0x40
+#define BE_PME_D3COLD_CAP 0x80
+
/********************** LoopBack test *********************/
struct be_cmd_req_loopback_test {
struct be_cmd_req_hdr hdr;
@@ -1346,22 +1375,36 @@ struct be_cmd_resp_set_func_cap {
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
-struct amap_get_mac_list_context {
- u8 macid[31];
- u8 act;
-} __packed;
-
struct be_cmd_req_get_mac_list {
struct be_cmd_req_hdr hdr;
- u32 rsvd;
+ u8 mac_type;
+ u8 perm_override;
+ u16 iface_id;
+ u32 mac_id;
+ u32 rsvd[3];
+} __packed;
+
+struct get_list_macaddr {
+ u16 mac_addr_size;
+ union {
+ u8 macaddr[6];
+ struct {
+ u8 rsvd[2];
+ u32 mac_id;
+ } __packed s_mac_id;
+ } __packed mac_addr_id;
} __packed;
struct be_cmd_resp_get_mac_list {
struct be_cmd_resp_hdr hdr;
- u8 mac_count;
- u8 rsvd1;
- u16 rsvd2;
- u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+ struct get_list_macaddr fd_macaddr; /* Factory default mac */
+ struct get_list_macaddr macid_macaddr; /* soft mac */
+ u8 true_mac_count;
+ u8 pseudo_mac_count;
+ u8 mac_list_size;
+ u8 rsvd;
+ /* perm override mac */
+ struct get_list_macaddr macaddr_list[BE_MAX_MAC];
} __packed;
struct be_cmd_req_set_mac_list {
@@ -1372,6 +1415,55 @@ struct be_cmd_req_set_mac_list {
struct macaddr mac[BE_MAX_MAC];
} __packed;
+/*********************** HSW Config ***********************/
+struct amap_set_hsw_context {
+ u8 interface_id[16];
+ u8 rsvd0[14];
+ u8 pvid_valid;
+ u8 rsvd1;
+ u8 rsvd2[16];
+ u8 pvid[16];
+ u8 rsvd3[32];
+ u8 rsvd4[32];
+ u8 rsvd5[32];
+} __packed;
+
+struct be_cmd_req_set_hsw_config {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_set_hsw_context) / 8];
+} __packed;
+
+struct be_cmd_resp_set_hsw_config {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd;
+};
+
+struct amap_get_hsw_req_context {
+ u8 interface_id[16];
+ u8 rsvd0[14];
+ u8 pvid_valid;
+ u8 pport;
+} __packed;
+
+struct amap_get_hsw_resp_context {
+ u8 rsvd1[16];
+ u8 pvid[16];
+ u8 rsvd2[32];
+ u8 rsvd3[32];
+ u8 rsvd4[32];
+} __packed;
+
+struct be_cmd_req_get_hsw_config {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
+} __packed;
+
+struct be_cmd_resp_get_hsw_config {
+ struct be_cmd_resp_hdr hdr;
+ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
+ u32 rsvd;
+};
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1384,7 +1476,7 @@ struct be_port_rxf_stats_v1 {
u32 rx_in_range_errors;
u32 rx_out_range_errors;
u32 rx_frame_too_long;
- u32 rx_address_match_errors;
+ u32 rx_address_mismatch_drops;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
@@ -1492,8 +1584,7 @@ extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
extern int be_cmd_cq_create(struct be_adapter *adapter,
struct be_queue_info *cq, struct be_queue_info *eq,
- bool sol_evts, bool no_delay,
- int num_cqe_dma_coalesce);
+ bool no_delay, int num_cqe_dma_coalesce);
extern int be_cmd_mccq_create(struct be_adapter *adapter,
struct be_queue_info *mccq,
struct be_queue_info *cq);
@@ -1502,8 +1593,7 @@ extern int be_cmd_txq_create(struct be_adapter *adapter,
struct be_queue_info *cq);
extern int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id,
- u16 frag_size, u16 max_frame_size, u32 if_id,
- u32 rss, u8 *rss_id);
+ u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
@@ -1532,7 +1622,7 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
extern int be_cmd_reset_function(struct be_adapter *adapter);
extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter, int *status);
+extern int be_process_mcc(struct be_adapter *adapter);
extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
@@ -1575,7 +1665,12 @@ extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- u32 *pmac_id);
+ bool *pmac_id_active, u32 *pmac_id, u8 *mac);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
+extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
+ u32 domain, u16 intf_id);
+extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
+ u32 domain, u16 intf_id);
+extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 802e5ddef8a8..c1ff73cb0e62 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -37,20 +37,46 @@ enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
FIELDINFO(struct be_drv_stats, field)
static const struct be_ethtool_stat et_stats[] = {
- {DRVSTAT_INFO(tx_events)},
{DRVSTAT_INFO(rx_crc_errors)},
{DRVSTAT_INFO(rx_alignment_symbol_errors)},
{DRVSTAT_INFO(rx_pause_frames)},
{DRVSTAT_INFO(rx_control_frames)},
+ /* Received packets dropped when the Ethernet length field
+ * is not equal to the actual Ethernet data length.
+ */
{DRVSTAT_INFO(rx_in_range_errors)},
+ /* Received packets dropped when their length field is >= 1501 bytes
+ * and <= 1535 bytes.
+ */
{DRVSTAT_INFO(rx_out_range_errors)},
+ /* Received packets dropped when they are longer than 9216 bytes */
{DRVSTAT_INFO(rx_frame_too_long)},
- {DRVSTAT_INFO(rx_address_match_errors)},
+ /* Received packets dropped when they don't pass the unicast or
+ * multicast address filtering.
+ */
+ {DRVSTAT_INFO(rx_address_mismatch_drops)},
+ /* Received packets dropped when IP packet length field is less than
+ * the IP header length field.
+ */
{DRVSTAT_INFO(rx_dropped_too_small)},
+ /* Received packets dropped when IP length field is greater than
+ * the actual packet length.
+ */
{DRVSTAT_INFO(rx_dropped_too_short)},
+ /* Received packets dropped when the IP header length field is less
+ * than 5.
+ */
{DRVSTAT_INFO(rx_dropped_header_too_small)},
+ /* Received packets dropped when the TCP header length field is less
+ * than 5 or the TCP header length + IP header length is more
+ * than IP packet length.
+ */
{DRVSTAT_INFO(rx_dropped_tcp_length)},
{DRVSTAT_INFO(rx_dropped_runt)},
+ /* Number of received packets dropped when a fifo for descriptors going
+ * into the packet demux block overflows. In normal operation, this
+ * fifo must never overflow.
+ */
{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
{DRVSTAT_INFO(rx_ip_checksum_errs)},
@@ -59,16 +85,35 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(tx_pauseframes)},
{DRVSTAT_INFO(tx_controlframes)},
{DRVSTAT_INFO(rx_priority_pause_frames)},
+ /* Received packets dropped when an internal fifo going into
+ * main packet buffer tank (PMEM) overflows.
+ */
{DRVSTAT_INFO(pmem_fifo_overflow_drop)},
{DRVSTAT_INFO(jabber_events)},
+ /* Received packets dropped due to lack of available HW packet buffers
+ * used to temporarily hold the received packets.
+ */
{DRVSTAT_INFO(rx_drops_no_pbuf)},
- {DRVSTAT_INFO(rx_drops_no_txpb)},
+ /* Received packets dropped due to input receive buffer
+ * descriptor fifo overflowing.
+ */
{DRVSTAT_INFO(rx_drops_no_erx_descr)},
+ /* Packets dropped because the internal FIFO to the offloaded TCP
+ * receive processing block is full. This could happen only for
+ * offloaded iSCSI or FCoE trarffic.
+ */
{DRVSTAT_INFO(rx_drops_no_tpre_descr)},
+ /* Received packets dropped when they need more than 8
+ * receive buffers. This cannot happen as the driver configures
+ * 2048 byte receive buffers.
+ */
{DRVSTAT_INFO(rx_drops_too_many_frags)},
- {DRVSTAT_INFO(rx_drops_invalid_ring)},
{DRVSTAT_INFO(forwarded_packets)},
+ /* Received packets dropped when the frame length
+ * is more than 9018 bytes
+ */
{DRVSTAT_INFO(rx_drops_mtu)},
+ /* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
{DRVSTAT_INFO(be_on_die_temperature)}
};
@@ -80,12 +125,17 @@ static const struct be_ethtool_stat et_stats[] = {
static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
- {DRVSTAT_RX_INFO(rx_polls)},
- {DRVSTAT_RX_INFO(rx_events)},
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
+ /* Number of page allocation failures while posting receive buffers
+ * to HW.
+ */
{DRVSTAT_RX_INFO(rx_post_fail)},
+ /* Recevied packets dropped due to skb allocation failure */
{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
+ /* Received packets dropped due to lack of available fetched buffers
+ * posted by the driver.
+ */
{DRVSTAT_RX_INFO(rx_drops_no_frags)}
};
#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
@@ -97,9 +147,13 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
+ /* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
+ /* Number of TX work request blocks DMAed to HW */
{DRVSTAT_TX_INFO(tx_wrbs)},
- {DRVSTAT_TX_INFO(tx_compl)},
+ /* Number of times the TX queue was stopped due to lack
+ * of spaces in the TXQ.
+ */
{DRVSTAT_TX_INFO(tx_stops)}
};
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
@@ -232,86 +286,42 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
}
}
-static int
-be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+static int be_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ struct be_eq_obj *eqo = &adapter->eq_obj[0];
+
- coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
- coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
- coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
+ et->rx_coalesce_usecs = eqo->cur_eqd;
+ et->rx_coalesce_usecs_high = eqo->max_eqd;
+ et->rx_coalesce_usecs_low = eqo->min_eqd;
- coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
- coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
- coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
+ et->tx_coalesce_usecs = eqo->cur_eqd;
+ et->tx_coalesce_usecs_high = eqo->max_eqd;
+ et->tx_coalesce_usecs_low = eqo->min_eqd;
- coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
- coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
+ et->use_adaptive_rx_coalesce = eqo->enable_aic;
+ et->use_adaptive_tx_coalesce = eqo->enable_aic;
return 0;
}
-/*
- * This routine is used to set interrup coalescing delay
+/* TX attributes are ignored. Only RX attributes are considered
+ * eqd cmd is issued in the worker thread.
*/
-static int
-be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+static int be_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- struct be_eq_obj *rx_eq;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- u32 rx_max, rx_min, rx_cur;
- int status = 0, i;
- u32 tx_cur;
-
- if (coalesce->use_adaptive_tx_coalesce == 1)
- return -EINVAL;
-
- for_all_rx_queues(adapter, rxo, i) {
- rx_eq = &rxo->rx_eq;
-
- if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
- rx_eq->cur_eqd = 0;
- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
-
- rx_max = coalesce->rx_coalesce_usecs_high;
- rx_min = coalesce->rx_coalesce_usecs_low;
- rx_cur = coalesce->rx_coalesce_usecs;
-
- if (rx_eq->enable_aic) {
- if (rx_max > BE_MAX_EQD)
- rx_max = BE_MAX_EQD;
- if (rx_min > rx_max)
- rx_min = rx_max;
- rx_eq->max_eqd = rx_max;
- rx_eq->min_eqd = rx_min;
- if (rx_eq->cur_eqd > rx_max)
- rx_eq->cur_eqd = rx_max;
- if (rx_eq->cur_eqd < rx_min)
- rx_eq->cur_eqd = rx_min;
- } else {
- if (rx_cur > BE_MAX_EQD)
- rx_cur = BE_MAX_EQD;
- if (rx_eq->cur_eqd != rx_cur) {
- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
- rx_cur);
- if (!status)
- rx_eq->cur_eqd = rx_cur;
- }
- }
- }
-
- tx_cur = coalesce->tx_coalesce_usecs;
-
- if (tx_cur > BE_MAX_EQD)
- tx_cur = BE_MAX_EQD;
- if (tx_eq->cur_eqd != tx_cur) {
- status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
- if (!status)
- tx_eq->cur_eqd = tx_cur;
+ struct be_eq_obj *eqo;
+ int i;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ eqo->enable_aic = et->use_adaptive_rx_coalesce;
+ eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+ eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
+ eqo->eqd = et->rx_coalesce_usecs;
}
return 0;
@@ -590,26 +600,16 @@ be_set_phys_id(struct net_device *netdev,
return 0;
}
-static bool
-be_is_wol_supported(struct be_adapter *adapter)
-{
- if (!be_physfn(adapter))
- return false;
- else
- return true;
-}
static void
be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (be_is_wol_supported(adapter))
- wol->supported = WAKE_MAGIC;
-
- if (adapter->wol)
- wol->wolopts = WAKE_MAGIC;
- else
+ if (be_is_wol_supported(adapter)) {
+ wol->supported |= WAKE_MAGIC;
+ wol->wolopts |= WAKE_MAGIC;
+ } else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -620,9 +620,14 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct be_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & ~WAKE_MAGIC)
- return -EINVAL;
+ return -EOPNOTSUPP;
+
+ if (!be_is_wol_supported(adapter)) {
+ dev_warn(&adapter->pdev->dev, "WOL not supported\n");
+ return -EOPNOTSUPP;
+ }
- if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
+ if (wol->wolopts & WAKE_MAGIC)
adapter->wol = true;
else
adapter->wol = false;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e703d64434f8..528a886bc2cd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -127,9 +127,11 @@ static inline bool be_is_mc(struct be_adapter *adapter) {
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
- if (mem->va)
+ if (mem->va) {
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
+ mem->va = NULL;
+ }
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -144,7 +146,7 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
GFP_KERNEL);
if (!mem->va)
- return -1;
+ return -ENOMEM;
memset(mem->va, 0, mem->size);
return 0;
}
@@ -233,7 +235,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int status = 0;
u8 current_mac[ETH_ALEN];
- u32 pmac_id = adapter->pmac_id;
+ u32 pmac_id = adapter->pmac_id[0];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -246,7 +248,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id, 0);
+ adapter->if_handle, &adapter->pmac_id[0], 0);
if (status)
goto err;
@@ -286,7 +288,9 @@ static void populate_be2_stats(struct be_adapter *adapter)
drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
drvs->rx_dropped_header_too_small =
port_stats->rx_dropped_header_too_small;
- drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_address_mismatch_drops =
+ port_stats->rx_address_mismatch_drops +
+ port_stats->rx_vlan_mismatch_drops;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
@@ -298,9 +302,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
else
drvs->jabber_events = rxf_stats->port0_jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
- drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
- drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
@@ -337,7 +339,7 @@ static void populate_be3_stats(struct be_adapter *adapter)
port_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop =
port_stats->rx_input_fifo_overflow_drop;
- drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@ -345,9 +347,7 @@ static void populate_be3_stats(struct be_adapter *adapter)
drvs->tx_controlframes = port_stats->tx_controlframes;
drvs->jabber_events = port_stats->jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
- drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
- drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
@@ -380,13 +380,14 @@ static void populate_lancer_stats(struct be_adapter *adapter)
drvs->rx_dropped_header_too_small =
pport_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
+ drvs->rx_address_mismatch_drops =
+ pport_stats->rx_address_mismatch_drops +
+ pport_stats->rx_vlan_mismatch_drops;
drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
drvs->jabber_events = pport_stats->rx_jabbers;
- drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
drvs->forwarded_packets = pport_stats->num_forwards_lo;
drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
drvs->rx_drops_too_many_frags =
@@ -884,6 +885,29 @@ static void be_set_rx_mode(struct net_device *netdev)
goto done;
}
+ if (netdev_uc_count(netdev) != adapter->uc_macs) {
+ struct netdev_hw_addr *ha;
+ int i = 1; /* First slot is claimed by the Primary MAC */
+
+ for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+ }
+
+ if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
+ adapter->promiscuous = true;
+ goto done;
+ }
+
+ netdev_for_each_uc_addr(ha, adapter->netdev) {
+ adapter->uc_macs++; /* First slot is for Primary MAC */
+ be_cmd_pmac_add(adapter, (u8 *)ha->addr,
+ adapter->if_handle,
+ &adapter->pmac_id[adapter->uc_macs], 0);
+ }
+ }
+
be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
done:
return;
@@ -954,14 +978,21 @@ static int be_set_vf_vlan(struct net_device *netdev,
return -EINVAL;
if (vlan) {
- adapter->vf_cfg[vf].vlan_tag = vlan;
- adapter->vlans_added++;
+ if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+ /* If this is new value, program it. Else skip. */
+ adapter->vf_cfg[vf].vlan_tag = vlan;
+
+ status = be_cmd_set_hsw_config(adapter, vlan,
+ vf + 1, adapter->vf_cfg[vf].if_handle);
+ }
} else {
+ /* Reset Transparent Vlan Tagging. */
adapter->vf_cfg[vf].vlan_tag = 0;
- adapter->vlans_added--;
+ vlan = adapter->vf_cfg[vf].def_vid;
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+ adapter->vf_cfg[vf].if_handle);
}
- status = be_vid_config(adapter, true, vf);
if (status)
dev_info(&adapter->pdev->dev,
@@ -997,18 +1028,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
+static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
{
- struct be_eq_obj *rx_eq = &rxo->rx_eq;
- struct be_rx_stats *stats = rx_stats(rxo);
+ struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
ulong now = jiffies;
ulong delta = now - stats->rx_jiffies;
u64 pkts;
unsigned int start, eqd;
- if (!rx_eq->enable_aic)
+ if (!eqo->enable_aic) {
+ eqd = eqo->eqd;
+ goto modify_eqd;
+ }
+
+ if (eqo->idx >= adapter->num_rx_qs)
return;
+ stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+
/* Wrapped around */
if (time_before(now, stats->rx_jiffies)) {
stats->rx_jiffies = now;
@@ -1027,17 +1064,16 @@ static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
stats->rx_pkts_prev = pkts;
stats->rx_jiffies = now;
- eqd = stats->rx_pps / 110000;
- eqd = eqd << 3;
- if (eqd > rx_eq->max_eqd)
- eqd = rx_eq->max_eqd;
- if (eqd < rx_eq->min_eqd)
- eqd = rx_eq->min_eqd;
+ eqd = (stats->rx_pps / 110000) << 3;
+ eqd = min(eqd, eqo->max_eqd);
+ eqd = max(eqd, eqo->min_eqd);
if (eqd < 10)
eqd = 0;
- if (eqd != rx_eq->cur_eqd) {
- be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
- rx_eq->cur_eqd = eqd;
+
+modify_eqd:
+ if (eqd != eqo->cur_eqd) {
+ be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
+ eqo->cur_eqd = eqd;
}
}
@@ -1065,11 +1101,10 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
(rxcp->ip_csum || rxcp->ipv6);
}
-static struct be_rx_page_info *
-get_rx_page_info(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u16 frag_idx)
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
+ u16 frag_idx)
{
+ struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
@@ -1088,16 +1123,15 @@ get_rx_page_info(struct be_adapter *adapter,
}
/* Throwaway the data in the Rx completion */
-static void be_rx_compl_discard(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_discard(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
index_inc(&rxcp->rxq_idx, rxq->len);
@@ -1108,8 +1142,8 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* skb_fill_rx_data forms a complete skb for an ether frame
* indicated by rxcp.
*/
-static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
- struct sk_buff *skb, struct be_rx_compl_info *rxcp)
+static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
+ struct be_rx_compl_info *rxcp)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
@@ -1117,7 +1151,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
u16 hdr_len, curr_frag_len, remaining;
u8 *start;
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
@@ -1154,7 +1188,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
index_inc(&rxcp->rxq_idx, rxq->len);
remaining = rxcp->pkt_size - curr_frag_len;
for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */
@@ -1182,21 +1216,21 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_process(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
{
+ struct be_adapter *adapter = rxo->adapter;
struct net_device *netdev = adapter->netdev;
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
+ skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
if (unlikely(!skb)) {
rx_stats(rxo)->rx_drops_no_skbs++;
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
return;
}
- skb_fill_rx_data(adapter, rxo, skb, rxcp);
+ skb_fill_rx_data(rxo, skb, rxcp);
if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1204,7 +1238,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- if (adapter->netdev->features & NETIF_F_RXHASH)
+ if (netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
@@ -1215,26 +1249,25 @@ static void be_rx_compl_process(struct be_adapter *adapter,
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
-static void be_rx_compl_process_gro(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
{
+ struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
struct be_queue_info *rxq = &rxo->q;
- struct be_eq_obj *eq_obj = &rxo->rx_eq;
u16 remaining, curr_frag_len;
u16 i, j;
- skb = napi_get_frags(&eq_obj->napi);
+ skb = napi_get_frags(napi);
if (!skb) {
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
return;
}
remaining = rxcp->pkt_size;
for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1267,12 +1300,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
- napi_gro_frags(&eq_obj->napi);
+ napi_gro_frags(napi);
}
-static void be_parse_rx_compl_v1(struct be_adapter *adapter,
- struct be_eth_rx_compl *compl,
- struct be_rx_compl_info *rxcp)
+static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
{
rxcp->pkt_size =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
@@ -1303,9 +1335,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
}
-static void be_parse_rx_compl_v0(struct be_adapter *adapter,
- struct be_eth_rx_compl *compl,
- struct be_rx_compl_info *rxcp)
+static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
{
rxcp->pkt_size =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
@@ -1351,9 +1382,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
be_dws_le_to_cpu(compl, sizeof(*compl));
if (adapter->be3_native)
- be_parse_rx_compl_v1(adapter, compl, rxcp);
+ be_parse_rx_compl_v1(compl, rxcp);
else
- be_parse_rx_compl_v0(adapter, compl, rxcp);
+ be_parse_rx_compl_v0(compl, rxcp);
if (rxcp->vlanf) {
/* vlanf could be wrongly set in some cards.
@@ -1392,7 +1423,6 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
{
struct be_adapter *adapter = rxo->adapter;
- struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL;
@@ -1434,7 +1464,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
prev_page_info = page_info;
queue_head_inc(rxq);
- page_info = &page_info_tbl[rxq->head];
+ page_info = &rxo->page_info_tbl[rxq->head];
}
if (pagep)
prev_page_info->last_page_user = true;
@@ -1496,62 +1526,51 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
return num_wrbs;
}
-static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
+/* Return the number of events in the event queue */
+static inline int events_get(struct be_eq_obj *eqo)
{
- struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+ struct be_eq_entry *eqe;
+ int num = 0;
- if (!eqe->evt)
- return NULL;
+ do {
+ eqe = queue_tail_node(&eqo->q);
+ if (eqe->evt == 0)
+ break;
- rmb();
- eqe->evt = le32_to_cpu(eqe->evt);
- queue_tail_inc(&eq_obj->q);
- return eqe;
+ rmb();
+ eqe->evt = 0;
+ num++;
+ queue_tail_inc(&eqo->q);
+ } while (true);
+
+ return num;
}
-static int event_handle(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj,
- bool rearm)
+static int event_handle(struct be_eq_obj *eqo)
{
- struct be_eq_entry *eqe;
- u16 num = 0;
+ bool rearm = false;
+ int num = events_get(eqo);
- while ((eqe = event_get(eq_obj)) != NULL) {
- eqe->evt = 0;
- num++;
- }
-
- /* Deal with any spurious interrupts that come
- * without events
- */
+ /* Deal with any spurious interrupts that come without events */
if (!num)
rearm = true;
- be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
if (num)
- napi_schedule(&eq_obj->napi);
+ napi_schedule(&eqo->napi);
return num;
}
-/* Just read and notify events without processing them.
- * Used at the time of destroying event queues */
-static void be_eq_clean(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
+/* Leaves the EQ is disarmed state */
+static void be_eq_clean(struct be_eq_obj *eqo)
{
- struct be_eq_entry *eqe;
- u16 num = 0;
+ int num = events_get(eqo);
- while ((eqe = event_get(eq_obj)) != NULL) {
- eqe->evt = 0;
- num++;
- }
-
- if (num)
- be_eq_notify(adapter, eq_obj->q.id, false, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
}
-static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
{
struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
@@ -1561,14 +1580,14 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
/* First cleanup pending rx completions */
while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
- be_rx_compl_discard(adapter, rxo, rxcp);
- be_cq_notify(adapter, rx_cq->id, false, 1);
+ be_rx_compl_discard(rxo, rxcp);
+ be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
}
/* Then free posted rx buffer that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(adapter, rxo, tail);
+ page_info = get_rx_page_info(rxo, tail);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
@@ -1576,52 +1595,104 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
rxq->tail = rxq->head = 0;
}
-static void be_tx_compl_clean(struct be_adapter *adapter,
- struct be_tx_obj *txo)
+static void be_tx_compl_clean(struct be_adapter *adapter)
{
- struct be_queue_info *tx_cq = &txo->cq;
- struct be_queue_info *txq = &txo->q;
+ struct be_tx_obj *txo;
+ struct be_queue_info *txq;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
- struct sk_buff **sent_skbs = txo->sent_skb_list;
struct sk_buff *sent_skb;
bool dummy_wrb;
+ int i, pending_txqs;
/* Wait for a max of 200ms for all the tx-completions to arrive. */
do {
- while ((txcp = be_tx_compl_get(tx_cq))) {
- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
- cmpl++;
- }
- if (cmpl) {
- be_cq_notify(adapter, tx_cq->id, false, cmpl);
- atomic_sub(num_wrbs, &txq->used);
- cmpl = 0;
- num_wrbs = 0;
+ pending_txqs = adapter->num_tx_qs;
+
+ for_all_tx_queues(adapter, txo, i) {
+ txq = &txo->q;
+ while ((txcp = be_tx_compl_get(&txo->cq))) {
+ end_idx =
+ AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp);
+ num_wrbs += be_tx_compl_process(adapter, txo,
+ end_idx);
+ cmpl++;
+ }
+ if (cmpl) {
+ be_cq_notify(adapter, txo->cq.id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
+ cmpl = 0;
+ num_wrbs = 0;
+ }
+ if (atomic_read(&txq->used) == 0)
+ pending_txqs--;
}
- if (atomic_read(&txq->used) == 0 || ++timeo > 200)
+ if (pending_txqs == 0 || ++timeo > 200)
break;
mdelay(1);
} while (true);
- if (atomic_read(&txq->used))
- dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
- atomic_read(&txq->used));
+ for_all_tx_queues(adapter, txo, i) {
+ txq = &txo->q;
+ if (atomic_read(&txq->used))
+ dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
+ atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = txo->sent_skb_list[txq->tail];
+ end_idx = txq->tail;
+ num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
+ &dummy_wrb);
+ index_adv(&end_idx, num_wrbs - 1, txq->len);
+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
+ }
+ }
+}
+
+static void be_evt_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_eq_obj *eqo;
+ int i;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ be_eq_clean(eqo);
+ if (eqo->q.created)
+ be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ be_queue_free(adapter, &eqo->q);
+ }
+}
+
+static int be_evt_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq;
+ struct be_eq_obj *eqo;
+ int i, rc;
+
+ adapter->num_evt_qs = num_irqs(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
+ eqo->adapter = adapter;
+ eqo->tx_budget = BE_TX_BUDGET;
+ eqo->idx = i;
+ eqo->max_eqd = BE_MAX_EQD;
+ eqo->enable_aic = true;
+
+ eq = &eqo->q;
+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry));
+ if (rc)
+ return rc;
- /* free posted tx for which compls will never arrive */
- while (atomic_read(&txq->used)) {
- sent_skb = sent_skbs[txq->tail];
- end_idx = txq->tail;
- index_adv(&end_idx,
- wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
- txq->len);
- num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
- atomic_sub(num_wrbs, &txq->used);
+ rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
+ if (rc)
+ return rc;
}
+ return 0;
}
static void be_mcc_queues_destroy(struct be_adapter *adapter)
@@ -1644,22 +1715,19 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *q, *cq;
- /* Alloc MCC compl queue */
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
sizeof(struct be_mcc_compl)))
goto err;
- /* Ask BE to create MCC compl queue; share TX's eq */
- if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
+ /* Use the default EQ for MCC completions */
+ if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
goto mcc_cq_free;
- /* Alloc MCC queue */
q = &adapter->mcc_obj.q;
if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
goto mcc_cq_destroy;
- /* Ask BE to create MCC queue */
if (be_cmd_mccq_create(adapter, q, cq))
goto mcc_q_free;
@@ -1692,14 +1760,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q);
}
-
- /* Clear any residual events */
- be_eq_clean(adapter, &adapter->tx_eq);
-
- q = &adapter->tx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
}
static int be_num_txqs_want(struct be_adapter *adapter)
@@ -1712,10 +1772,10 @@ static int be_num_txqs_want(struct be_adapter *adapter)
return MAX_TX_QS;
}
-/* One TX event queue is shared by all TX compl qs */
-static int be_tx_queues_create(struct be_adapter *adapter)
+static int be_tx_cqs_create(struct be_adapter *adapter)
{
- struct be_queue_info *eq, *q, *cq;
+ struct be_queue_info *cq, *eq;
+ int status;
struct be_tx_obj *txo;
u8 i;
@@ -1727,192 +1787,109 @@ static int be_tx_queues_create(struct be_adapter *adapter)
rtnl_unlock();
}
- adapter->tx_eq.max_eqd = 0;
- adapter->tx_eq.min_eqd = 0;
- adapter->tx_eq.cur_eqd = 96;
- adapter->tx_eq.enable_aic = false;
+ for_all_tx_queues(adapter, txo, i) {
+ cq = &txo->cq;
+ status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
+ sizeof(struct be_eth_tx_compl));
+ if (status)
+ return status;
- eq = &adapter->tx_eq.q;
- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry)))
- return -1;
+ /* If num_evt_qs is less than num_tx_qs, then more than
+ * one txq share an eq
+ */
+ eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
+ status = be_cmd_cq_create(adapter, cq, eq, false, 3);
+ if (status)
+ return status;
+ }
+ return 0;
+}
- if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
- goto err;
- adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
+static int be_tx_qs_create(struct be_adapter *adapter)
+{
+ struct be_tx_obj *txo;
+ int i, status;
for_all_tx_queues(adapter, txo, i) {
- cq = &txo->cq;
- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
- sizeof(struct be_eth_tx_compl)))
- goto err;
-
- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
- goto err;
+ status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
+ sizeof(struct be_eth_wrb));
+ if (status)
+ return status;
- q = &txo->q;
- if (be_queue_alloc(adapter, q, TX_Q_LEN,
- sizeof(struct be_eth_wrb)))
- goto err;
+ status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ if (status)
+ return status;
}
- return 0;
-err:
- be_tx_queues_destroy(adapter);
- return -1;
+ return 0;
}
-static void be_rx_queues_destroy(struct be_adapter *adapter)
+static void be_rx_cqs_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
struct be_rx_obj *rxo;
int i;
for_all_rx_queues(adapter, rxo, i) {
- be_queue_free(adapter, &rxo->q);
-
q = &rxo->cq;
if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q);
-
- q = &rxo->rx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
}
}
-static u32 be_num_rxqs_want(struct be_adapter *adapter)
+static int be_rx_cqs_create(struct be_adapter *adapter)
{
- if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !sriov_enabled(adapter) && be_physfn(adapter)) {
- return 1 + MAX_RSS_QS; /* one default non-RSS queue */
- } else {
- dev_warn(&adapter->pdev->dev,
- "No support for multiple RX queues\n");
- return 1;
- }
-}
-
-static int be_rx_queues_create(struct be_adapter *adapter)
-{
- struct be_queue_info *eq, *q, *cq;
+ struct be_queue_info *eq, *cq;
struct be_rx_obj *rxo;
int rc, i;
- adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
- msix_enabled(adapter) ?
- adapter->num_msix_vec - 1 : 1);
- if (adapter->num_rx_qs != MAX_RX_QS)
- dev_warn(&adapter->pdev->dev,
- "Can create only %d RX queues", adapter->num_rx_qs);
+ /* We'll create as many RSS rings as there are irqs.
+ * But when there's only one irq there's no use creating RSS rings
+ */
+ adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
+ num_irqs(adapter) + 1 : 1;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter;
- rxo->rx_eq.max_eqd = BE_MAX_EQD;
- rxo->rx_eq.enable_aic = true;
-
- /* EQ */
- eq = &rxo->rx_eq.q;
- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
- if (rc)
- goto err;
-
- rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
- if (rc)
- goto err;
-
- rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
-
- /* CQ */
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
sizeof(struct be_eth_rx_compl));
if (rc)
- goto err;
-
- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
- if (rc)
- goto err;
+ return rc;
- /* Rx Q - will be created in be_open() */
- q = &rxo->q;
- rc = be_queue_alloc(adapter, q, RX_Q_LEN,
- sizeof(struct be_eth_rx_d));
+ eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
+ rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (rc)
- goto err;
-
+ return rc;
}
- return 0;
-err:
- be_rx_queues_destroy(adapter);
- return -1;
-}
+ if (adapter->num_rx_qs != MAX_RX_QS)
+ dev_info(&adapter->pdev->dev,
+ "Created only %d receive queues", adapter->num_rx_qs);
-static bool event_peek(struct be_eq_obj *eq_obj)
-{
- struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
- if (!eqe->evt)
- return false;
- else
- return true;
+ return 0;
}
static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
- struct be_rx_obj *rxo;
- int isr, i, tx = 0 , rx = 0;
-
- if (lancer_chip(adapter)) {
- if (event_peek(&adapter->tx_eq))
- tx = event_handle(adapter, &adapter->tx_eq, false);
- for_all_rx_queues(adapter, rxo, i) {
- if (event_peek(&rxo->rx_eq))
- rx |= event_handle(adapter, &rxo->rx_eq, true);
- }
-
- if (!(tx || rx))
- return IRQ_NONE;
-
- } else {
- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
- if (!isr)
- return IRQ_NONE;
-
- if ((1 << adapter->tx_eq.eq_idx & isr))
- event_handle(adapter, &adapter->tx_eq, false);
-
- for_all_rx_queues(adapter, rxo, i) {
- if ((1 << rxo->rx_eq.eq_idx & isr))
- event_handle(adapter, &rxo->rx_eq, true);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t be_msix_rx(int irq, void *dev)
-{
- struct be_rx_obj *rxo = dev;
- struct be_adapter *adapter = rxo->adapter;
-
- event_handle(adapter, &rxo->rx_eq, true);
+ int num_evts;
- return IRQ_HANDLED;
+ /* With INTx only one EQ is used */
+ num_evts = event_handle(&adapter->eq_obj[0]);
+ if (num_evts)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
-static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
+static irqreturn_t be_msix(int irq, void *dev)
{
- struct be_adapter *adapter = dev;
-
- event_handle(adapter, &adapter->tx_eq, false);
+ struct be_eq_obj *eqo = dev;
+ event_handle(eqo);
return IRQ_HANDLED;
}
@@ -1921,16 +1898,14 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
return (rxcp->tcpf && !rxcp->err) ? true : false;
}
-static int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
+ int budget)
{
- struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
u32 work_done;
- rx_stats(rxo)->rx_polls++;
for (work_done = 0; work_done < budget; work_done++) {
rxcp = be_rx_compl_get(rxo);
if (!rxcp)
@@ -1942,7 +1917,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
/* Discard compl with partial DMA Lancer B0 */
if (unlikely(!rxcp->pkt_size)) {
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -1951,94 +1926,96 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
*/
if (unlikely(rxcp->port != adapter->port_num &&
!lancer_chip(adapter))) {
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
if (do_gro(rxcp))
- be_rx_compl_process_gro(adapter, rxo, rxcp);
+ be_rx_compl_process_gro(rxo, napi, rxcp);
else
- be_rx_compl_process(adapter, rxo, rxcp);
+ be_rx_compl_process(rxo, rxcp);
loop_continue:
be_rx_stats_update(rxo, rxcp);
}
- be_cq_notify(adapter, rx_cq->id, false, work_done);
+ if (work_done) {
+ be_cq_notify(adapter, rx_cq->id, true, work_done);
- /* Refill the queue */
- if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
- be_post_rx_frags(rxo, GFP_ATOMIC);
-
- /* All consumed */
- if (work_done < budget) {
- napi_complete(napi);
- /* Arm CQ */
- be_cq_notify(adapter, rx_cq->id, true, 0);
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ be_post_rx_frags(rxo, GFP_ATOMIC);
}
+
return work_done;
}
-/* As TX and MCC share the same EQ check for both TX and MCC completions.
- * For TX/MCC we don't honour budget; consume everything
- */
-static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
+ int budget, int idx)
{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
- int tx_compl, mcc_compl, status = 0;
- u8 i;
- u16 num_wrbs;
+ int num_wrbs = 0, work_done;
- for_all_tx_queues(adapter, txo, i) {
- tx_compl = 0;
- num_wrbs = 0;
- while ((txcp = be_tx_compl_get(&txo->cq))) {
- num_wrbs += be_tx_compl_process(adapter, txo,
+ for (work_done = 0; work_done < budget; work_done++) {
+ txcp = be_tx_compl_get(&txo->cq);
+ if (!txcp)
+ break;
+ num_wrbs += be_tx_compl_process(adapter, txo,
AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp));
- tx_compl++;
- }
- if (tx_compl) {
- be_cq_notify(adapter, txo->cq.id, true, tx_compl);
-
- atomic_sub(num_wrbs, &txo->q.used);
+ }
- /* As Tx wrbs have been freed up, wake up netdev queue
- * if it was stopped due to lack of tx wrbs. */
- if (__netif_subqueue_stopped(adapter->netdev, i) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
- netif_wake_subqueue(adapter->netdev, i);
- }
+ if (work_done) {
+ be_cq_notify(adapter, txo->cq.id, true, work_done);
+ atomic_sub(num_wrbs, &txo->q.used);
- u64_stats_update_begin(&tx_stats(txo)->sync_compl);
- tx_stats(txo)->tx_compl += tx_compl;
- u64_stats_update_end(&tx_stats(txo)->sync_compl);
+ /* As Tx wrbs have been freed up, wake up netdev queue
+ * if it was stopped due to lack of tx wrbs. */
+ if (__netif_subqueue_stopped(adapter->netdev, idx) &&
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
+ netif_wake_subqueue(adapter->netdev, idx);
}
+
+ u64_stats_update_begin(&tx_stats(txo)->sync_compl);
+ tx_stats(txo)->tx_compl += work_done;
+ u64_stats_update_end(&tx_stats(txo)->sync_compl);
}
+ return (work_done < budget); /* Done */
+}
- mcc_compl = be_process_mcc(adapter, &status);
+int be_poll(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter = eqo->adapter;
+ int max_work = 0, work, i;
+ bool tx_done;
- if (mcc_compl) {
- be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
+ /* Process all TXQs serviced by this EQ */
+ for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
+ tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
+ eqo->tx_budget, i);
+ if (!tx_done)
+ max_work = budget;
}
- napi_complete(napi);
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
+ work = be_process_rx(&adapter->rx_obj[i], napi, budget);
+ max_work = max(work, max_work);
+ }
- /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
- if (lancer_chip(adapter) && !msix_enabled(adapter)) {
- for_all_tx_queues(adapter, txo, i)
- be_cq_notify(adapter, txo->cq.id, true, 0);
+ if (is_mcc_eqo(eqo))
+ be_process_mcc(adapter);
- be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+ if (max_work < budget) {
+ napi_complete(napi);
+ be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ } else {
+ /* As we'll continue in polling mode, count and clear events */
+ be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
}
-
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
- adapter->drv_stats.tx_events++;
- return 1;
+ return max_work;
}
void be_detect_dump_ue(struct be_adapter *adapter)
@@ -2113,12 +2090,24 @@ static void be_msix_disable(struct be_adapter *adapter)
}
}
+static uint be_num_rss_want(struct be_adapter *adapter)
+{
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ adapter->num_vfs == 0 && be_physfn(adapter) &&
+ !be_is_mc(adapter))
+ return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ else
+ return 0;
+}
+
static void be_msix_enable(struct be_adapter *adapter)
{
-#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
+#define BE_MIN_MSIX_VECTORS 1
int i, status, num_vec;
- num_vec = be_num_rxqs_want(adapter) + 1;
+ /* If RSS queues are not used, need a vec for default RX Q */
+ num_vec = min(be_num_rss_want(adapter), num_online_cpus());
+ num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -2186,60 +2175,31 @@ static void be_sriov_disable(struct be_adapter *adapter)
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
+ struct be_eq_obj *eqo)
{
- return adapter->msix_entries[eq_obj->eq_idx].vector;
-}
-
-static int be_request_irq(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj,
- void *handler, char *desc, void *context)
-{
- struct net_device *netdev = adapter->netdev;
- int vec;
-
- sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
- vec = be_msix_vec_get(adapter, eq_obj);
- return request_irq(vec, handler, 0, eq_obj->desc, context);
-}
-
-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
- void *context)
-{
- int vec = be_msix_vec_get(adapter, eq_obj);
- free_irq(vec, context);
+ return adapter->msix_entries[eqo->idx].vector;
}
static int be_msix_register(struct be_adapter *adapter)
{
- struct be_rx_obj *rxo;
- int status, i;
- char qname[10];
-
- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
- adapter);
- if (status)
- goto err;
+ struct net_device *netdev = adapter->netdev;
+ struct be_eq_obj *eqo;
+ int status, i, vec;
- for_all_rx_queues(adapter, rxo, i) {
- sprintf(qname, "rxq%d", i);
- status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
- qname, rxo);
+ for_all_evt_queues(adapter, eqo, i) {
+ sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ vec = be_msix_vec_get(adapter, eqo);
+ status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
if (status)
goto err_msix;
}
return 0;
-
err_msix:
- be_free_irq(adapter, &adapter->tx_eq, adapter);
-
- for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
- be_free_irq(adapter, &rxo->rx_eq, rxo);
-
-err:
- dev_warn(&adapter->pdev->dev,
- "MSIX Request IRQ failed - err %d\n", status);
+ for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
+ free_irq(be_msix_vec_get(adapter, eqo), eqo);
+ dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2275,7 +2235,7 @@ done:
static void be_irq_unregister(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
if (!adapter->isr_registered)
@@ -2288,16 +2248,14 @@ static void be_irq_unregister(struct be_adapter *adapter)
}
/* MSIx */
- be_free_irq(adapter, &adapter->tx_eq, adapter);
-
- for_all_rx_queues(adapter, rxo, i)
- be_free_irq(adapter, &rxo->rx_eq, rxo);
+ for_all_evt_queues(adapter, eqo, i)
+ free_irq(be_msix_vec_get(adapter, eqo), eqo);
done:
adapter->isr_registered = false;
}
-static void be_rx_queues_clear(struct be_adapter *adapter)
+static void be_rx_qs_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
struct be_rx_obj *rxo;
@@ -2312,76 +2270,67 @@ static void be_rx_queues_clear(struct be_adapter *adapter)
* arrive
*/
mdelay(1);
- be_rx_q_clean(adapter, rxo);
+ be_rx_cq_clean(rxo);
}
-
- /* Clear any residual events */
- q = &rxo->rx_eq.q;
- if (q->created)
- be_eq_clean(adapter, &rxo->rx_eq);
+ be_queue_free(adapter, q);
}
}
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- struct be_tx_obj *txo;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- int vec, i;
+ struct be_eq_obj *eqo;
+ int i;
be_async_mcc_disable(adapter);
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
- for_all_rx_queues(adapter, rxo, i)
- napi_disable(&rxo->rx_eq.napi);
-
- napi_disable(&tx_eq->napi);
-
- if (lancer_chip(adapter)) {
- be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
- for_all_rx_queues(adapter, rxo, i)
- be_cq_notify(adapter, rxo->cq.id, false, 0);
- for_all_tx_queues(adapter, txo, i)
- be_cq_notify(adapter, txo->cq.id, false, 0);
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_disable(&eqo->napi);
+ if (msix_enabled(adapter))
+ synchronize_irq(be_msix_vec_get(adapter, eqo));
+ else
+ synchronize_irq(netdev->irq);
+ be_eq_clean(eqo);
}
- if (msix_enabled(adapter)) {
- vec = be_msix_vec_get(adapter, tx_eq);
- synchronize_irq(vec);
-
- for_all_rx_queues(adapter, rxo, i) {
- vec = be_msix_vec_get(adapter, &rxo->rx_eq);
- synchronize_irq(vec);
- }
- } else {
- synchronize_irq(netdev->irq);
- }
be_irq_unregister(adapter);
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- for_all_tx_queues(adapter, txo, i)
- be_tx_compl_clean(adapter, txo);
+ be_tx_compl_clean(adapter);
- be_rx_queues_clear(adapter);
+ be_rx_qs_destroy(adapter);
return 0;
}
-static int be_rx_queues_setup(struct be_adapter *adapter)
+static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) {
+ rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
+ sizeof(struct be_eth_rx_d));
+ if (rc)
+ return rc;
+ }
+
+ /* The FW would like the default RXQ to be created first */
+ rxo = default_rxo(adapter);
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
+ adapter->if_handle, false, &rxo->rss_id);
+ if (rc)
+ return rc;
+
+ for_all_rss_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
- rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
- adapter->if_handle,
- (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
+ rx_frag_size, adapter->if_handle,
+ true, &rxo->rss_id);
if (rc)
return rc;
}
@@ -2395,48 +2344,47 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
}
}
rc = be_cmd_rss_config(adapter, rsstable, 128);
-
if (rc)
return rc;
}
/* First time posting */
- for_all_rx_queues(adapter, rxo, i) {
+ for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
- napi_enable(&rxo->rx_eq.napi);
- }
return 0;
}
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ struct be_eq_obj *eqo;
struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
u8 link_status;
int status, i;
- status = be_rx_queues_setup(adapter);
+ status = be_rx_qs_create(adapter);
if (status)
goto err;
- napi_enable(&tx_eq->napi);
-
be_irq_register(adapter);
if (!lancer_chip(adapter))
be_intr_set(adapter, true);
- /* The evt queues are created in unarmed state; arm them */
- for_all_rx_queues(adapter, rxo, i) {
- be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
+ for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, true, 0);
- }
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
- /* Now that interrupts are on we can process async mcc */
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
be_async_mcc_enable(adapter);
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_enable(&eqo->napi);
+ be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ }
+
status = be_cmd_link_status_query(adapter, NULL, NULL,
&link_status, 0);
if (!status)
@@ -2540,17 +2488,32 @@ static void be_vf_clear(struct be_adapter *adapter)
static int be_clear(struct be_adapter *adapter)
{
+ int i = 1;
+
+ if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
+ cancel_delayed_work_sync(&adapter->work);
+ adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
+ }
+
if (sriov_enabled(adapter))
be_vf_clear(adapter);
+ for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_mcc_queues_destroy(adapter);
- be_rx_queues_destroy(adapter);
+ be_rx_cqs_destroy(adapter);
be_tx_queues_destroy(adapter);
+ be_evt_queues_destroy(adapter);
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
+
+ be_msix_disable(adapter);
+ kfree(adapter->pmac_id);
return 0;
}
@@ -2569,7 +2532,7 @@ static int be_vf_setup(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
- u16 lnk_speed;
+ u16 def_vlan, lnk_speed;
int status;
be_vf_setup_init(adapter);
@@ -2593,6 +2556,12 @@ static int be_vf_setup(struct be_adapter *adapter)
if (status)
goto err;
vf_cfg->tx_rate = lnk_speed * 10;
+
+ status = be_cmd_get_hsw_config(adapter, &def_vlan,
+ vf + 1, vf_cfg->if_handle);
+ if (status)
+ goto err;
+ vf_cfg->def_vid = def_vlan;
}
return 0;
err:
@@ -2609,19 +2578,28 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->eq_next_idx = 0;
}
-static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
{
u32 pmac_id;
- int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
- if (status != 0)
- goto do_none;
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, adapter->if_handle, pmac_id);
+ int status;
+ bool pmac_id_active;
+
+ status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
+ &pmac_id, mac);
if (status != 0)
goto do_none;
- status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id, 0);
+
+ if (pmac_id_active) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, adapter->if_handle, pmac_id);
+
+ if (!status)
+ adapter->pmac_id[0] = pmac_id;
+ } else {
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->if_handle, &adapter->pmac_id[0], 0);
+ }
do_none:
return status;
}
@@ -2631,24 +2609,29 @@ static int be_setup(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
- int status, i;
+ int status;
u8 mac[ETH_ALEN];
- struct be_tx_obj *txo;
be_setup_init(adapter);
be_cmd_req_native_mode(adapter);
- status = be_tx_queues_create(adapter);
- if (status != 0)
+ be_msix_enable(adapter);
+
+ status = be_evt_queues_create(adapter);
+ if (status)
goto err;
- status = be_rx_queues_create(adapter);
- if (status != 0)
+ status = be_tx_cqs_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_rx_cqs_create(adapter);
+ if (status)
goto err;
status = be_mcc_queues_create(adapter);
- if (status != 0)
+ if (status)
goto err;
memset(mac, 0, ETH_ALEN);
@@ -2670,23 +2653,17 @@ static int be_setup(struct be_adapter *adapter)
}
status = be_cmd_if_create(adapter, cap_flags, en_flags,
netdev->dev_addr, &adapter->if_handle,
- &adapter->pmac_id, 0);
+ &adapter->pmac_id[0], 0);
if (status != 0)
goto err;
- for_all_tx_queues(adapter, txo, i) {
- status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
- if (status)
- goto err;
- }
-
/* The VF's permanent mac queried from card is incorrect.
* For BEx: Query the mac configued by the PF using if_handle
* For Lancer: Get and use mac_list to obtain mac address.
*/
if (!be_physfn(adapter)) {
if (lancer_chip(adapter))
- status = be_configure_mac_from_list(adapter, mac);
+ status = be_add_mac_from_list(adapter, mac);
else
status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, false,
@@ -2697,6 +2674,10 @@ static int be_setup(struct be_adapter *adapter)
}
}
+ status = be_tx_qs_create(adapter);
+ if (status)
+ goto err;
+
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
status = be_vid_config(adapter, false, 0);
@@ -2726,6 +2707,9 @@ static int be_setup(struct be_adapter *adapter)
goto err;
}
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+
return 0;
err:
be_clear(adapter);
@@ -2736,12 +2720,13 @@ err:
static void be_netpoll(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
- event_handle(adapter, &adapter->tx_eq, false);
- for_all_rx_queues(adapter, rxo, i)
- event_handle(adapter, &rxo->rx_eq, true);
+ for_all_evt_queues(adapter, eqo, i)
+ event_handle(eqo);
+
+ return;
}
#endif
@@ -3102,7 +3087,7 @@ static const struct net_device_ops be_netdev_ops = {
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -3117,20 +3102,18 @@ static void be_netdev_init(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->flags |= IFF_MULTICAST;
netif_set_gso_max_size(netdev, 65535);
- BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
+ netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
- for_all_rx_queues(adapter, rxo, i)
- netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
- BE_NAPI_WEIGHT);
-
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
- BE_NAPI_WEIGHT);
+ for_all_evt_queues(adapter, eqo, i)
+ netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3289,8 +3272,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
- cancel_delayed_work_sync(&adapter->work);
-
unregister_netdev(adapter->netdev);
be_clear(adapter);
@@ -3301,8 +3282,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_sriov_disable(adapter);
- be_msix_disable(adapter);
-
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -3310,6 +3289,12 @@ static void __devexit be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
+bool be_is_wol_supported(struct be_adapter *adapter)
+{
+ return ((adapter->wol_cap & BE_WOL_CAP) &&
+ !be_is_wol_excluded(adapter)) ? true : false;
+}
+
static int be_get_config(struct be_adapter *adapter)
{
int status;
@@ -3320,14 +3305,36 @@ static int be_get_config(struct be_adapter *adapter)
return status;
if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
else
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ if (be_physfn(adapter))
+ adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
+ else
+ adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
+
+ /* primary mac needs 1 pmac entry */
+ adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
+ status = be_cmd_get_acpi_wol_cap(adapter);
+ if (status) {
+ /* in case of a failure to get wol capabillities
+ * check the exclusion list to determine WOL capability */
+ if (!be_is_wol_excluded(adapter))
+ adapter->wol_cap |= BE_WOL_CAP;
+ }
+
+ if (be_is_wol_supported(adapter))
+ adapter->wol = true;
+
return 0;
}
@@ -3469,6 +3476,7 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
if (lancer_chip(adapter))
@@ -3479,15 +3487,7 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
+ be_process_mcc(adapter);
goto reschedule;
}
@@ -3500,14 +3500,15 @@ static void be_worker(struct work_struct *work)
}
for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
be_post_rx_frags(rxo, GFP_KERNEL);
}
}
+ for_all_evt_queues(adapter, eqo, i)
+ be_eqd_update(adapter, eqo);
+
reschedule:
adapter->work_counter++;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -3593,6 +3594,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto ctrl_clean;
+ /* The INTR bit may be set in the card when probed by a kdump kernel
+ * after a crash.
+ */
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
status = be_stats_init(adapter);
if (status)
goto ctrl_clean;
@@ -3601,14 +3608,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto stats_clean;
- /* The INTR bit may be set in the card when probed by a kdump kernel
- * after a crash.
- */
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
- be_msix_enable(adapter);
-
INIT_DELAYED_WORK(&adapter->work, be_worker);
adapter->rx_fc = adapter->tx_fc = true;
@@ -3621,9 +3620,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status != 0)
goto unsetup;
- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+ dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
+ adapter->port_num);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
@@ -3653,7 +3652,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- cancel_delayed_work_sync(&adapter->work);
if (adapter->wol)
be_setup_wol(adapter, true);
@@ -3665,7 +3663,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
}
be_clear(adapter);
- be_msix_disable(adapter);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3687,7 +3684,6 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
- be_msix_enable(adapter);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
@@ -3704,7 +3700,6 @@ static int be_resume(struct pci_dev *pdev)
if (adapter->wol)
be_setup_wol(adapter, false);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 60f0e788cc25..a38167810546 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ethoc.c
+ * linux/drivers/net/ethernet/ethoc.c
*
* Copyright (C) 2007-2008 Avionic Design Development GmbH
* Copyright (C) 2008-2009 Avionic Design GmbH
@@ -776,10 +776,16 @@ static int ethoc_set_mac_address(struct net_device *dev, void *addr)
struct ethoc *priv = netdev_priv(dev);
u8 *mac = (u8 *)addr;
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
return 0;
}
@@ -909,11 +915,11 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
unsigned int phy;
int num_bd;
int ret = 0;
+ bool random_mac = false;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
if (!netdev) {
- dev_err(&pdev->dev, "cannot allocate network device\n");
ret = -ENOMEM;
goto out;
}
@@ -1050,10 +1056,19 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* Check the MAC again for validity, if it still isn't choose and
* program a random one. */
- if (!is_valid_ether_addr(netdev->dev_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
random_ether_addr(netdev->dev_addr);
+ random_mac = true;
+ }
+
+ ret = ethoc_set_mac_address(netdev, netdev->dev_addr);
+ if (ret) {
+ dev_err(&netdev->dev, "failed to set MAC address\n");
+ goto error;
+ }
- ethoc_set_mac_address(netdev, netdev->dev_addr);
+ if (random_mac)
+ netdev->addr_assign_type |= NET_ADDR_RANDOM;
/* register MII bus */
priv->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 47f85c337cf7..16b07048274c 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1289,7 +1289,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- random_ether_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
netdev_info(netdev, "generated random MAC address %pM\n",
netdev->dev_addr);
}
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index bb336a0959c9..829b1092fd78 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1133,7 +1133,7 @@ static int ftmac100_probe(struct platform_device *pdev)
netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- random_ether_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
netdev_info(netdev, "generated random MAC address %pM\n",
netdev->dev_addr);
}
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c82d444b582d..1637b9862292 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1070,14 +1070,13 @@ static void allocate_rx_buffers(struct net_device *dev)
while (np->really_rx_count != RX_RING_SIZE) {
struct sk_buff *skb;
- skb = dev_alloc_skb(np->rx_buf_sz);
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz);
if (skb == NULL)
break; /* Better luck next round. */
while (np->lack_rxbuf->skbuff)
np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
- skb->dev = dev; /* Mark as being used by this device. */
np->lack_rxbuf->skbuff = skb;
np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
@@ -1265,7 +1264,7 @@ static void init_ring(struct net_device *dev)
/* allocate skb for rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
if (skb == NULL) {
np->lack_rxbuf = &np->rx_ring[i];
@@ -1274,7 +1273,6 @@ static void init_ring(struct net_device *dev)
++np->really_rx_count;
np->rx_ring[i].skbuff = skb;
- skb->dev = dev; /* Mark as being used by this device. */
np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->rx_ring[i].status = RXOWN;
@@ -1704,7 +1702,7 @@ static int netdev_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev,
np->cur_rx->buffer,
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index e92ef1bd732a..a12b3f5bc025 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -711,7 +711,7 @@ fec_enet_rx(struct net_device *ndev)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
if (unlikely(!skb)) {
printk("%s: Memory squeeze, dropping packet.\n",
@@ -1210,7 +1210,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
- skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb) {
fec_enet_free_buffers(ndev);
return -ENOMEM;
@@ -1739,21 +1739,6 @@ static struct platform_driver fec_driver = {
.remove = __devexit_p(fec_drv_remove),
};
-static int __init
-fec_enet_module_init(void)
-{
- printk(KERN_INFO "FEC Ethernet Driver\n");
-
- return platform_driver_register(&fec_driver);
-}
-
-static void __exit
-fec_enet_cleanup(void)
-{
- platform_driver_unregister(&fec_driver);
-}
-
-module_exit(fec_enet_cleanup);
-module_init(fec_enet_module_init);
+module_platform_driver(fec_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 30745b56fe5d..7b34d8c698da 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -160,7 +160,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
struct sk_buff *skb;
while (!bcom_queue_full(rxtsk)) {
- skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+ skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
if (!skb)
return -EAGAIN;
@@ -416,7 +416,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
/* skbs are allocated on open, so now we allocate a new one,
* and remove the old (with the packet) */
- skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+ skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
if (!skb) {
/* Can't get a new one : reuse the same & drop pkt */
dev_notice(&dev->dev, "Low memory - dropped packet.\n");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h
index 41d2dffde55b..10afa54dd062 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.h
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h
@@ -1,5 +1,5 @@
/*
- * drivers/drivers/net/fec_mpc52xx/fec.h
+ * drivers/net/ethernet/freescale/fec_mpc52xx.h
*
* Driver for the MPC5200 Fast Ethernet Controller
*
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index e980527e2b99..b9fe5bde432a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -23,6 +23,10 @@
#define FEC_ECNTRL_ETHER_EN 0x00000002
#define FEC_ECNTRL_RESET 0x00000001
+/* RMII mode enabled only when MII_MODE bit is set too. */
+#define FEC_RCNTRL_RMII_MODE (0x00000100 | \
+ FEC_RCNTRL_MII_MODE | FEC_RCNTRL_FCE)
+#define FEC_RCNTRL_FCE 0x00000020
#define FEC_RCNTRL_BC_REJ 0x00000010
#define FEC_RCNTRL_PROM 0x00000008
#define FEC_RCNTRL_MII_MODE 0x00000004
@@ -33,8 +37,6 @@
#define FEC_TCNTRL_HBC 0x00000002
#define FEC_TCNTRL_GTS 0x00000001
-
-
/*
* Delay to wait for FEC reset command to complete (in us)
*/
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 910a8e18a9ae..e4e6cd2c5f82 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -154,7 +154,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */
- skbn = dev_alloc_skb(pkt_len + 2);
+ skbn = netdev_alloc_skb(dev, pkt_len + 2);
if (skbn != NULL) {
skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb,
@@ -165,7 +165,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skbn = skbt;
}
} else {
- skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+ skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skbn)
skb_align(skbn, ENET_RX_ALIGN);
@@ -286,7 +286,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */
- skbn = dev_alloc_skb(pkt_len + 2);
+ skbn = netdev_alloc_skb(dev, pkt_len + 2);
if (skbn != NULL) {
skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb,
@@ -297,7 +297,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
skbn = skbt;
}
} else {
- skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+ skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skbn)
skb_align(skbn, ENET_RX_ALIGN);
@@ -504,7 +504,7 @@ void fs_init_bds(struct net_device *dev)
* Initialize the receive buffer descriptors.
*/
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
- skb = dev_alloc_skb(ENET_RX_FRSIZE);
+ skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skb == NULL) {
dev_warn(fep->dev,
"Memory squeeze, unable to allocate skb\n");
@@ -592,7 +592,7 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct fs_enet_private *fep = netdev_priv(dev);
/* Alloc new skb */
- new_skb = dev_alloc_skb(skb->len + 4);
+ new_skb = netdev_alloc_skb(dev, skb->len + 4);
if (!new_skb) {
if (net_ratelimit()) {
dev_warn(fep->dev,
@@ -790,16 +790,20 @@ static int fs_init_phy(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct phy_device *phydev;
+ phy_interface_t iface;
fep->oldlink = 0;
fep->oldspeed = 0;
fep->oldduplex = -1;
+ iface = fep->fpi->use_rmii ?
+ PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
+
phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ iface);
if (!phydev) {
phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ iface);
}
if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
@@ -1007,6 +1011,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
struct fs_platform_info *fpi;
const u32 *data;
const u8 *mac_addr;
+ const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
match = of_match_device(fs_enet_match, &ofdev->dev);
@@ -1035,6 +1040,13 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
NULL)))
goto out_free_fpi;
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
+ phy_connection_type = of_get_property(ofdev->dev.of_node,
+ "phy-connection-type", NULL);
+ if (phy_connection_type && !strcmp("rmii", phy_connection_type))
+ fpi->use_rmii = 1;
+ }
+
privsize = sizeof(*fep) +
sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring);
@@ -1150,6 +1162,10 @@ static struct of_device_id fs_enet_match[] = {
.compatible = "fsl,mpc5121-fec",
.data = (void *)&fs_fec_ops,
},
+ {
+ .compatible = "fsl,mpc5125-fec",
+ .data = (void *)&fs_fec_ops,
+ },
#else
{
.compatible = "fsl,pq1-fec-enet",
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b9fbc83d64a7..9ae6cdbcac2e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -322,10 +322,11 @@ static void restart(struct net_device *dev)
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
#else
/*
- * Only set MII mode - do not touch maximum frame length
+ * Only set MII/RMII mode - do not touch maximum frame length
* configured before.
*/
- FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);
+ FS(fecp, r_cntrl, fpi->use_rmii ?
+ FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
#endif
/*
* adjust to duplex mode
@@ -381,7 +382,9 @@ static void stop(struct net_device *dev)
/* shut down FEC1? that's where the mii bus is */
if (fpi->has_phy) {
- FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FS(fecp, r_cntrl, fpi->use_rmii ?
+ FEC_RCNTRL_RMII_MODE :
+ FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
FW(fecp, ievent, FEC_ENET_MII);
FW(fecp, mii_speed, feci->mii_speed);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 39d160d353a4..d9428f0e738a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/gianfar.c
+ * drivers/net/ethernet/freescale/gianfar.c
*
* Gianfar Ethernet Driver
* This driver is designed for the non-CPM ethernet controllers
@@ -104,10 +104,7 @@
#include "fsl_pq_mdio.h"
#define TX_TIMEOUT (1*HZ)
-#undef BRIEF_GFAR_ERRORS
-#undef VERBOSE_GFAR_ERRORS
-const char gfar_driver_name[] = "Gianfar Ethernet";
const char gfar_driver_version[] = "1.3";
static int gfar_enet_open(struct net_device *dev);
@@ -1755,9 +1752,12 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
+ struct netdev_queue *txq;
tx_queue = priv->tx_queue[i];
+ txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
if(tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
+ netdev_tx_reset_queue(txq);
}
for (i = 0; i < priv->num_rx_queues; i++) {
@@ -2217,6 +2217,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
+ netdev_tx_sent_queue(txq, skb->len);
+
/*
* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
@@ -2460,6 +2462,7 @@ static void gfar_align_skb(struct sk_buff *skb)
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
struct net_device *dev = tx_queue->dev;
+ struct netdev_queue *txq;
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *bdp, *next = NULL;
@@ -2471,10 +2474,13 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
+ int tqi = tx_queue->qindex;
+ unsigned int bytes_sent = 0;
u32 lstatus;
size_t buflen;
- rx_queue = priv->rx_queue[tx_queue->qindex];
+ rx_queue = priv->rx_queue[tqi];
+ txq = netdev_get_tx_queue(dev, tqi);
bdp = tx_queue->dirty_tx;
skb_dirtytx = tx_queue->skb_dirtytx;
@@ -2533,6 +2539,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
}
+ bytes_sent += skb->len;
+
/*
* If there's room in the queue (limit it to rx_buffer_size)
* we add this skb back into the pool, if it's the right size
@@ -2557,13 +2565,15 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
}
/* If we freed a buffer, we can restart transmission, if necessary */
- if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
- netif_wake_subqueue(dev, tx_queue->qindex);
+ if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
+ netif_wake_subqueue(dev, tqi);
/* Update dirty indicators */
tx_queue->skb_dirtytx = skb_dirtytx;
tx_queue->dirty_tx = bdp;
+ netdev_tx_completed_queue(txq, howmany, bytes_sent);
+
return howmany;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 40c33a7554c0..fc2488adca36 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/gianfar.h
+ * drivers/net/ethernet/freescale/gianfar.h
*
* Gianfar Ethernet Driver
* Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
@@ -78,11 +78,8 @@ struct ethtool_rx_list {
#define INCREMENTAL_BUFFER_SIZE 512
#define PHY_INIT_TIMEOUT 100000
-#define GFAR_PHY_CHANGE_TIME 2
-#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, "
#define DRV_NAME "gfar-enet"
-extern const char gfar_driver_name[];
extern const char gfar_driver_version[];
/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 5a3b2e5b2880..8d74efd04bb9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/gianfar_ethtool.c
+ * drivers/net/ethernet/freescale/gianfar_ethtool.c
*
* Gianfar Ethernet Driver
* Ethtool support for Gianfar Enet
@@ -58,7 +58,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
-static char stat_gstrings[][ETH_GSTRING_LEN] = {
+static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-dropped-by-kernel",
"rx-large-frame-errors",
"rx-short-frame-errors",
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 83e0ed757e33..5fd620bec15c 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -564,6 +564,6 @@ static struct platform_driver gianfar_ptp_driver = {
module_platform_driver(gianfar_ptp_driver);
-MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_DESCRIPTION("PTP clock using the eTSEC");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index 64f4094ac7f1..cd14a4d449c2 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/gianfar_sysfs.c
+ * drivers/net/ethernet/freescale/gianfar_sysfs.c
*
* Gianfar Ethernet Driver
* This driver is designed for the non-CPM ethernet controllers
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba2dc083bfc0..4e3cd2f8debb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -214,8 +214,9 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
skb = __skb_dequeue(&ugeth->rx_recycle);
if (!skb)
- skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+ skb = netdev_alloc_skb(ugeth->ndev,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (skb == NULL)
return NULL;
@@ -227,8 +228,6 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
(((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
1)));
- skb->dev = ugeth->ndev;
-
out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(ugeth->dev,
skb->data,
@@ -1857,11 +1856,93 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge
return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
}
-static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
+static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ u16 i, j;
+ u8 __iomem *bd;
+
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ /* Return existing data buffers in ring */
+ bd = ugeth->p_rx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
+ if (ugeth->rx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ ugeth->ug_info->
+ uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(
+ ugeth->rx_skbuff[i][j]);
+ ugeth->rx_skbuff[i][j] = NULL;
+ }
+ bd += sizeof(struct qe_bd);
+ }
+
+ kfree(ugeth->rx_skbuff[i]);
+
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->rx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ ugeth->p_rx_bd_ring[i] = NULL;
+ }
+ }
+
+}
+
+static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
u16 i, j;
u8 __iomem *bd;
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ bd = ugeth->p_tx_bd_ring[i];
+ if (!bd)
+ continue;
+ for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
+ if (ugeth->tx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ (in_be32((u32 __iomem *)bd) &
+ BD_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
+ ugeth->tx_skbuff[i][j] = NULL;
+ }
+ }
+
+ kfree(ugeth->tx_skbuff[i]);
+
+ if (ugeth->p_tx_bd_ring[i]) {
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->tx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->tx_bd_ring_offset[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
+ }
+ }
+
+}
+
+static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
+{
if (!ugeth)
return;
@@ -1928,64 +2009,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
kfree(ugeth->p_init_enet_param_shadow);
ugeth->p_init_enet_param_shadow = NULL;
}
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
- bd = ugeth->p_tx_bd_ring[i];
- if (!bd)
- continue;
- for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
- if (ugeth->tx_skbuff[i][j]) {
- dma_unmap_single(ugeth->dev,
- in_be32(&((struct qe_bd __iomem *)bd)->buf),
- (in_be32((u32 __iomem *)bd) &
- BD_LENGTH_MASK),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
- ugeth->tx_skbuff[i][j] = NULL;
- }
- }
-
- kfree(ugeth->tx_skbuff[i]);
-
- if (ugeth->p_tx_bd_ring[i]) {
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->tx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->tx_bd_ring_offset[i]);
- ugeth->p_tx_bd_ring[i] = NULL;
- }
- }
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
- if (ugeth->p_rx_bd_ring[i]) {
- /* Return existing data buffers in ring */
- bd = ugeth->p_rx_bd_ring[i];
- for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
- if (ugeth->rx_skbuff[i][j]) {
- dma_unmap_single(ugeth->dev,
- in_be32(&((struct qe_bd __iomem *)bd)->buf),
- ugeth->ug_info->
- uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(
- ugeth->rx_skbuff[i][j]);
- ugeth->rx_skbuff[i][j] = NULL;
- }
- bd += sizeof(struct qe_bd);
- }
-
- kfree(ugeth->rx_skbuff[i]);
-
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->rx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->rx_bd_ring_offset[i]);
- ugeth->p_rx_bd_ring[i] = NULL;
- }
- }
+ ucc_geth_free_tx(ugeth);
+ ucc_geth_free_rx(ugeth);
while (!list_empty(&ugeth->group_hash_q))
put_enet_addr_container(ENET_ADDR_CONT_ENTRY
(dequeue(&ugeth->group_hash_q)));
@@ -2211,6 +2236,171 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
return 0;
}
+static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int length;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Allocate Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Allocate in multiple of
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
+ according to spec */
+ length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
+ / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_TX_BD_RING_ALIGNMENT;
+ ugeth->tx_bd_ring_offset[j] =
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
+
+ if (ugeth->tx_bd_ring_offset[j] != 0)
+ ugeth->p_tx_bd_ring[j] =
+ (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->tx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_TX_BD_RING_ALIGNMENT);
+ if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
+ ugeth->p_tx_bd_ring[j] =
+ (u8 __iomem *) qe_muram_addr(ugeth->
+ tx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_tx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for Tx bd rings.",
+ __func__);
+ return -ENOMEM;
+ }
+ /* Zero unused end of bd ring, according to spec */
+ memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
+ ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
+ length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ }
+
+ /* Init Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenTx[j],
+ GFP_KERNEL);
+
+ if (ugeth->tx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Could not allocate tx_skbuff",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
+ ugeth->tx_skbuff[j][i] = NULL;
+
+ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
+ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
+ }
+
+ return 0;
+}
+
+static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int length;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Allocate Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ ugeth->rx_bd_ring_offset[j] =
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
+ if (ugeth->rx_bd_ring_offset[j] != 0)
+ ugeth->p_rx_bd_ring[j] =
+ (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->rx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_RX_BD_RING_ALIGNMENT);
+ if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
+ ugeth->p_rx_bd_ring[j] =
+ (u8 __iomem *) qe_muram_addr(ugeth->
+ rx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_rx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for Rx bd rings.",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ /* Init Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenRx[j],
+ GFP_KERNEL);
+
+ if (ugeth->rx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Could not allocate rx_skbuff",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
+ ugeth->rx_skbuff[j][i] = NULL;
+
+ ugeth->skb_currx[j] = 0;
+ bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_I);
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
+ }
+
+ return 0;
+}
+
static int ucc_geth_startup(struct ucc_geth_private *ugeth)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -2223,11 +2413,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command;
- u32 ifstat, i, j, size, l2qt, l3qt, length;
+ u32 ifstat, i, j, size, l2qt, l3qt;
u16 temoder = UCC_GETH_TEMODER_INIT;
u16 test;
u8 function_code = 0;
- u8 __iomem *bd;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
@@ -2367,142 +2556,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
0, &uf_regs->upsmr, &ug_regs->uescr);
- /* Allocate Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
- /* Allocate in multiple of
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
- according to spec */
- length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
- / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_TX_BD_RING_ALIGNMENT;
- ugeth->tx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
-
- if (ugeth->tx_bd_ring_offset[j] != 0)
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->tx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_TX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- tx_bd_ring_offset[j]);
- }
- if (!ugeth->p_tx_bd_ring[j]) {
- if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate memory for Tx bd rings.",
- __func__);
- return -ENOMEM;
- }
- /* Zero unused end of bd ring, according to spec */
- memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
- length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
- }
-
- /* Allocate Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
- length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_RX_BD_RING_ALIGNMENT;
- ugeth->rx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
- if (ugeth->rx_bd_ring_offset[j] != 0)
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->rx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_RX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- rx_bd_ring_offset[j]);
- }
- if (!ugeth->p_rx_bd_ring[j]) {
- if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate memory for Rx bd rings.",
- __func__);
- return -ENOMEM;
- }
- }
-
- /* Init Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
- /* Setup the skbuff rings */
- ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
- ugeth->ug_info->bdRingLenTx[j],
- GFP_KERNEL);
-
- if (ugeth->tx_skbuff[j] == NULL) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Could not allocate tx_skbuff",
- __func__);
- return -ENOMEM;
- }
-
- for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
- ugeth->tx_skbuff[j][i] = NULL;
-
- ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
- bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
- for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
- /* clear bd buffer */
- out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
- /* set bd status and length */
- out_be32((u32 __iomem *)bd, 0);
- bd += sizeof(struct qe_bd);
- }
- bd -= sizeof(struct qe_bd);
- /* set bd status and length */
- out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
- }
-
- /* Init Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
- /* Setup the skbuff rings */
- ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
- ugeth->ug_info->bdRingLenRx[j],
- GFP_KERNEL);
-
- if (ugeth->rx_skbuff[j] == NULL) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Could not allocate rx_skbuff",
- __func__);
- return -ENOMEM;
- }
-
- for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
- ugeth->rx_skbuff[j][i] = NULL;
+ ret_val = ucc_geth_alloc_tx(ugeth);
+ if (ret_val != 0)
+ return ret_val;
- ugeth->skb_currx[j] = 0;
- bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
- for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
- /* set bd status and length */
- out_be32((u32 __iomem *)bd, R_I);
- /* clear bd buffer */
- out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
- bd += sizeof(struct qe_bd);
- }
- bd -= sizeof(struct qe_bd);
- /* set bd status and length */
- out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
- }
+ ret_val = ucc_geth_alloc_rx(ugeth);
+ if (ret_val != 0)
+ return ret_val;
/*
* Global PRAM
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
index 7c6c908bdf02..586b46fd4eed 100644
--- a/drivers/net/ethernet/fujitsu/at1700.c
+++ b/drivers/net/ethernet/fujitsu/at1700.c
@@ -757,7 +757,7 @@ net_rx(struct net_device *dev)
dev->stats.rx_errors++;
break;
}
- skb = dev_alloc_skb(pkt_len+3);
+ skb = netdev_alloc_skb(dev, pkt_len + 3);
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet (len %d).\n",
dev->name, pkt_len);
diff --git a/drivers/net/ethernet/fujitsu/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c
index b0e2313af3d1..c3f0178fb5cb 100644
--- a/drivers/net/ethernet/fujitsu/eth16i.c
+++ b/drivers/net/ethernet/fujitsu/eth16i.c
@@ -1164,7 +1164,7 @@ static void eth16i_rx(struct net_device *dev)
else { /* Ok so now we should have a good packet */
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 3);
+ skb = netdev_alloc_skb(dev, pkt_len + 3);
if( skb == NULL ) {
printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
dev->name, pkt_len);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ee84b472cee6..0230319ddb59 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1002,7 +1002,7 @@ static void fjn_rx(struct net_device *dev)
dev->stats.rx_errors++;
break;
}
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
pkt_len);
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 6a5ee0776b28..d496673f0908 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1274,7 +1274,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
/* Note: This depends on the alloc_skb functions allocating more
* space than requested, i.e. aligning to 16bytes */
- ringptr->skb = dev_alloc_skb(roundup(MAX_ETHER_SIZE + 2, 4));
+ ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
if (NULL != ringptr->skb) {
/*
@@ -1284,7 +1284,6 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
*/
skb_reserve(ringptr->skb, 2);
- ringptr->skb->dev = dev;
ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
/* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
@@ -1817,7 +1816,7 @@ static void hp100_rx(struct net_device *dev)
#endif
/* Now we allocate the skb and transfer the data into it. */
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) { /* Not enough memory->drop packet */
#ifdef HP100_DEBUG
printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
@@ -2992,7 +2991,6 @@ static int __init hp100_isa_init(void)
for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
dev = alloc_etherdev(sizeof(struct hp100_private));
if (!dev) {
- printk(KERN_WARNING "hp100: no memory for network device\n");
while (cards > 0)
cleanup_dev(hp100_devlist[--cards]);
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
index ba82a266051d..6a5c21b82c51 100644
--- a/drivers/net/ethernet/i825xx/3c505.c
+++ b/drivers/net/ethernet/i825xx/3c505.c
@@ -583,7 +583,7 @@ static void receive_packet(struct net_device *dev, int len)
unsigned long flags;
rlen = (len + 1) & ~1;
- skb = dev_alloc_skb(rlen + 2);
+ skb = netdev_alloc_skb(dev, rlen + 2);
if (!skb) {
pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c
index 1e945551c144..ed6925f11479 100644
--- a/drivers/net/ethernet/i825xx/3c507.c
+++ b/drivers/net/ethernet/i825xx/3c507.c
@@ -851,7 +851,7 @@ static void el16_rx(struct net_device *dev)
struct sk_buff *skb;
pkt_len &= 0x3fff;
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
pr_err("%s: Memory squeeze, dropping packet.\n",
dev->name);
diff --git a/drivers/net/ethernet/i825xx/3c523.c b/drivers/net/ethernet/i825xx/3c523.c
index d70d3df4c985..8451ecd4c1ec 100644
--- a/drivers/net/ethernet/i825xx/3c523.c
+++ b/drivers/net/ethernet/i825xx/3c523.c
@@ -983,7 +983,7 @@ static void elmc_rcv_int(struct net_device *dev)
if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
- skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
+ skb = netdev_alloc_skb(dev, totlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2); /* 16 byte alignment */
skb_put(skb,totlen);
diff --git a/drivers/net/ethernet/i825xx/3c527.c b/drivers/net/ethernet/i825xx/3c527.c
index 474b5e71a53a..ef43f3e951c5 100644
--- a/drivers/net/ethernet/i825xx/3c527.c
+++ b/drivers/net/ethernet/i825xx/3c527.c
@@ -1169,7 +1169,7 @@ static void mc32_rx_ring(struct net_device *dev)
/* Try to save time by avoiding a copy on big frames */
if ((length > RX_COPYBREAK) &&
- ((newskb=dev_alloc_skb(1532)) != NULL))
+ ((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
{
skb=lp->rx_ring[rx_ring_tail].skb;
skb_put(skb, length);
@@ -1180,7 +1180,7 @@ static void mc32_rx_ring(struct net_device *dev)
}
else
{
- skb=dev_alloc_skb(length+2);
+ skb = netdev_alloc_skb(dev, length + 2);
if(skb==NULL) {
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index f2408a4d5d9c..6aa927af382c 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -549,14 +549,13 @@ static inline int init_rx_bufs(struct net_device *dev)
/* First build the Receive Buffer Descriptor List */
for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
- struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL) {
remove_rx_bufs(dev);
return -ENOMEM;
}
- skb->dev = dev;
rbd->v_next = rbd+1;
rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
@@ -810,7 +809,7 @@ static inline int i596_rx(struct net_device *dev)
struct sk_buff *newskb;
/* Get fresh skbuff to replace filled one. */
- newskb = dev_alloc_skb(PKT_BUF_SZ);
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (newskb == NULL) {
skb = NULL; /* drop pkt */
goto memory_squeeze;
@@ -819,7 +818,6 @@ static inline int i596_rx(struct net_device *dev)
skb_put(skb, pkt_len);
rx_in_place = 1;
rbd->skb = newskb;
- newskb->dev = dev;
rbd->v_data = newskb->data;
rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
#ifdef __mc68000__
@@ -827,7 +825,7 @@ static inline int i596_rx(struct net_device *dev)
#endif
}
else
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index 114cda7721fe..7a4ad4a07917 100644
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -1563,7 +1563,7 @@ eepro_rx(struct net_device *dev)
dev->stats.rx_bytes+=rcv_size;
rcv_size &= 0x3fff;
- skb = dev_alloc_skb(rcv_size+5);
+ skb = netdev_alloc_skb(dev, rcv_size + 5);
if (skb == NULL) {
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
index 3a9580f3d4dd..3fc649e54a32 100644
--- a/drivers/net/ethernet/i825xx/eexpress.c
+++ b/drivers/net/ethernet/i825xx/eexpress.c
@@ -955,7 +955,7 @@ static void eexp_hw_rx_pio(struct net_device *dev)
{
struct sk_buff *skb;
pkt_len &= 0x3fff;
- skb = dev_alloc_skb(pkt_len+16);
+ skb = netdev_alloc_skb(dev, pkt_len + 16);
if (skb == NULL)
{
printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index 42e90a97c7a5..406a12b46404 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -867,7 +867,7 @@ ether1_recv_done (struct net_device *dev)
struct sk_buff *skb;
length = (length + 1) & ~1;
- skb = dev_alloc_skb (length + 2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (skb) {
skb_reserve (skb, 2);
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 414044b3cb11..6c2952c8ea15 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -454,8 +454,6 @@ init_rx_bufs(struct net_device *dev, int num) {
}
rfd->rbd = rbd;
- } else {
- printk("Could not kmalloc rbd\n");
}
}
lp->rbd_tail->next = rfd->rbd;
@@ -658,7 +656,7 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp,
if (rfd->stat & RFD_STAT_OK) {
/* a good frame */
int pkt_len = (rfd->count & 0x3fff);
- struct sk_buff *skb = dev_alloc_skb(pkt_len);
+ struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
(*frames)++;
diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c
index c0893715ef47..272976e1bb0f 100644
--- a/drivers/net/ethernet/i825xx/ni52.c
+++ b/drivers/net/ethernet/i825xx/ni52.c
@@ -964,7 +964,7 @@ static void ni52_rcv_int(struct net_device *dev)
/* the first and the last buffer? */
totlen &= RBD_MASK; /* length of this frame */
writew(0x00, &rbd->status);
- skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
+ skb = netdev_alloc_skb(dev, totlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2);
skb_put(skb, totlen);
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 6ef5e11d1c84..cae17f4bc93e 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -28,7 +28,6 @@ static int automatic_resume = 0; /* experimental .. better should be zero */
static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
static int fifo=0x8; /* don't change */
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -779,7 +778,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
{
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
- skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ skb = netdev_alloc_skb(dev, totlen + 2);
if(skb != NULL)
{
skb_reserve(skb,2);
@@ -1151,28 +1150,6 @@ static void set_multicast_list(struct net_device *dev)
netif_wake_queue(dev);
}
-#ifdef MODULE
-#error This code is not currently supported as a module
-static struct net_device *dev_sun3_82586;
-
-int init_module(void)
-{
- dev_sun3_82586 = sun3_82586_probe(-1);
- if (IS_ERR(dev_sun3_82586))
- return PTR_ERR(dev_sun3_82586);
- return 0;
-}
-
-void cleanup_module(void)
-{
- unsigned long ioaddr = dev_sun3_82586->base_addr;
- unregister_netdev(dev_sun3_82586);
- release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
- iounmap((void *)ioaddr);
- free_netdev(dev_sun3_82586);
-}
-#endif /* MODULE */
-
#if 0
/*
* DUMP .. we expect a not running CMD unit and enough space
@@ -1209,5 +1186,3 @@ void sun3_82586_dump(struct net_device *dev,void *ptr)
printk("\n");
}
#endif
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
index 962b4c421f3f..a43649735a04 100644
--- a/drivers/net/ethernet/i825xx/znet.c
+++ b/drivers/net/ethernet/i825xx/znet.c
@@ -762,7 +762,7 @@ static void znet_rx(struct net_device *dev)
/* Malloc up new buffer. */
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len);
+ skb = netdev_alloc_skb(dev, pkt_len);
if (skb == NULL) {
if (znet_debug)
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index 9e16f3fa97b2..b9773d229192 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -29,10 +29,6 @@ config IBMVETH
To compile this driver as a module, choose M here. The module will
be called ibmveth.
-config ISERIES_VETH
- tristate "iSeries Virtual Ethernet driver support"
- depends on PPC_ISERIES
-
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile
index 5a7d4e9ac803..2f04e71a5926 100644
--- a/drivers/net/ethernet/ibm/Makefile
+++ b/drivers/net/ethernet/ibm/Makefile
@@ -3,6 +3,5 @@
#
obj-$(CONFIG_IBMVETH) += ibmveth.o
-obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
obj-$(CONFIG_IBM_EMAC) += emac/
obj-$(CONFIG_EHEA) += ehea/
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 6650068c996c..b8e46cc31e53 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea.h
+ * linux/drivers/net/ethernet/ibm/ehea/ehea.h
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6bdd8e36e564..95837b99a464 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_ethtool.c
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
index 1a2fe4dc3eb3..180d4128a711 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_hw.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_hw.h
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e6893cdfd13b..3516e17a399d 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_main.c
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
*
* eHEA ethernet device driver for IBM eServer System p
*
@@ -2982,7 +2982,6 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
if (!dev) {
- pr_err("no mem for net_device\n");
ret = -ENOMEM;
goto out_err;
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
index 0506967b9044..30f903332e92 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_phyp.c
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 2f8174c248bc..52c456ec4d6c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_phyp.h
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index c25b05b94daa..4fb47f14dbfe 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_qmr.c
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
index 337a47ecf4aa..8e4a70c20ab7 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/net/ehea/ehea_qmr.h
+ * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
*
* eHEA ethernet device driver for IBM eServer System p
*
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 2abce965c7bd..a0fe6e3fce61 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/core.c
+ * drivers/net/ethernet/ibm/emac/core.c
*
* Driver for PowerPC 4xx on-chip ethernet controller.
*
@@ -434,6 +434,11 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
else if (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
+ if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
+ r &= ~EMAC4_RMR_MJS_MASK;
+ r |= EMAC4_RMR_MJS(ndev->mtu);
+ }
+
return r;
}
@@ -965,6 +970,7 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
int rx_sync_size = emac_rx_sync_size(new_mtu);
int rx_skb_size = emac_rx_skb_size(new_mtu);
int i, ret = 0;
+ int mr1_jumbo_bit_change = 0;
mutex_lock(&dev->link_lock);
emac_netif_stop(dev);
@@ -1013,7 +1019,15 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
}
skip:
/* Check if we need to change "Jumbo" bit in MR1 */
- if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+ if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
+ mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
+ (dev->ndev->mtu > ETH_DATA_LEN);
+ } else {
+ mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
+ (dev->ndev->mtu > ETH_DATA_LEN);
+ }
+
+ if (mr1_jumbo_bit_change) {
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
@@ -2471,6 +2485,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
/* Disable any PHY features not supported by the platform */
dev->phy.def->features &= ~dev->phy_feat_exc;
+ dev->phy.features &= ~dev->phy_feat_exc;
/* Setup initial link parameters */
if (dev->phy.features & SUPPORTED_Autoneg) {
@@ -2568,6 +2583,11 @@ static int __devinit emac_init_config(struct emac_instance *dev)
if (of_device_is_compatible(np, "ibm,emac-405ex") ||
of_device_is_compatible(np, "ibm,emac-405exr"))
dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
+ if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
+ dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
+ EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
+ EMAC_FTR_460EX_PHY_CLK_FIX);
+ }
} else if (of_device_is_compatible(np, "ibm,emac4")) {
dev->features |= EMAC_FTR_EMAC4;
if (of_device_is_compatible(np, "ibm,emac-440gx"))
@@ -2706,11 +2726,9 @@ static int __devinit emac_probe(struct platform_device *ofdev)
/* Allocate our net_device structure */
err = -ENOMEM;
ndev = alloc_etherdev(sizeof(struct emac_instance));
- if (!ndev) {
- printk(KERN_ERR "%s: could not allocate ethernet device!\n",
- np->full_name);
+ if (!ndev)
goto err_gone;
- }
+
dev = netdev_priv(ndev);
dev->ndev = ndev;
dev->ofdev = ofdev;
@@ -2818,6 +2836,13 @@ static int __devinit emac_probe(struct platform_device *ofdev)
dev->stop_timeout = STOP_TIMEOUT_100;
INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
+ /* Some SoCs like APM821xx does not support Half Duplex mode. */
+ if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
+ dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+ }
+
/* Find PHY if any */
err = emac_init_phy(dev);
if (err != 0)
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index fa3ec57935fa..70074792bdef 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/core.h
+ * drivers/net/ethernet/ibm/emac/core.h
*
* Driver for PowerPC 4xx on-chip ethernet controller.
*
@@ -325,7 +325,14 @@ struct emac_instance {
* Set if we need phy clock workaround for 460ex or 460gt
*/
#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400
-
+/*
+ * APM821xx requires Jumbo frame size set explicitly
+ */
+#define EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE 0x00000800
+/*
+ * APM821xx does not support Half Duplex mode
+ */
+#define EMAC_FTR_APM821XX_NO_HALF_DUPLEX 0x00001000
/* Right now, we don't quite handle the always/possible masks on the
* most optimal way as we don't have a way to say something like
@@ -353,7 +360,9 @@ enum {
EMAC_FTR_NO_FLOW_CONTROL_40x |
#endif
EMAC_FTR_460EX_PHY_CLK_FIX |
- EMAC_FTR_440EP_PHY_CLK_FIX,
+ EMAC_FTR_440EP_PHY_CLK_FIX |
+ EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
+ EMAC_FTR_APM821XX_NO_HALF_DUPLEX,
};
static inline int emac_has_feature(struct emac_instance *dev,
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
index 8c6c1e2a8750..b16b4828b64d 100644
--- a/drivers/net/ethernet/ibm/emac/debug.c
+++ b/drivers/net/ethernet/ibm/emac/debug.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/debug.c
+ * drivers/net/ethernet/ibm/emac/debug.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
*
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 90477fe69d0c..59a92d5870b5 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/debug.h
+ * drivers/net/ethernet/ibm/emac/debug.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
*
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 1568278d759a..5afcc27ceebb 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/emac.h
+ * drivers/net/ethernet/ibm/emac/emac.h
*
* Register definitions for PowerPC 4xx on-chip ethernet contoller
*
@@ -212,6 +212,8 @@ struct emac_regs {
#define EMAC4_RMR_RFAF_64_1024 0x00000006
#define EMAC4_RMR_RFAF_128_2048 0x00000007
#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048
+#define EMAC4_RMR_MJS_MASK 0x0001fff8
+#define EMAC4_RMR_MJS(s) (((s) << 3) & EMAC4_RMR_MJS_MASK)
/* EMACx_ISR & EMACx_ISER */
#define EMAC4_ISR_TXPE 0x20000000
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index f3c50b97ec61..479e43e2f1ef 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/mal.c
+ * drivers/net/ethernet/ibm/emac/mal.c
*
* Memory Access Layer (MAL) support
*
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index d06f985bda32..e431a32e3d69 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/mal.h
+ * drivers/net/ethernet/ibm/emac/mal.h
*
* Memory Access Layer (MAL) support
*
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index ab4e5969fe65..d3b9d103353e 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/phy.c
+ * drivers/net/ethernet/ibm/emac/phy.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
* Borrowed from sungem_phy.c, though I only kept the generic MII
diff --git a/drivers/net/ethernet/ibm/emac/phy.h b/drivers/net/ethernet/ibm/emac/phy.h
index 5d2bf4cbe50b..d7e41ec37467 100644
--- a/drivers/net/ethernet/ibm/emac/phy.h
+++ b/drivers/net/ethernet/ibm/emac/phy.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/phy.h
+ * drivers/net/ethernet/ibm/emac/phy.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, PHY support
*
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fa53f3def64..d3123282e18e 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/rgmii.c
+ * drivers/net/ethernet/ibm/emac/rgmii.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
*
@@ -237,11 +237,8 @@ static int __devinit rgmii_probe(struct platform_device *ofdev)
rc = -ENOMEM;
dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
- if (dev == NULL) {
- printk(KERN_ERR "%s: could not allocate RGMII device!\n",
- np->full_name);
+ if (dev == NULL)
goto err_gone;
- }
mutex_init(&dev->lock);
dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 9296b6c5f920..668bceeff4a2 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/rgmii.h
+ * drivers/net/ethernet/ibm/emac/rgmii.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
*
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 5f51bf7c9dc5..872912ef518d 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/tah.c
+ * drivers/net/ethernet/ibm/emac/tah.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
*
@@ -96,11 +96,8 @@ static int __devinit tah_probe(struct platform_device *ofdev)
rc = -ENOMEM;
dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
- if (dev == NULL) {
- printk(KERN_ERR "%s: could not allocate TAH device!\n",
- np->full_name);
+ if (dev == NULL)
goto err_gone;
- }
mutex_init(&dev->lock);
dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 3437ab4964c7..350b7096a041 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/tah.h
+ * drivers/net/ethernet/ibm/emac/tah.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
*
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 97449e786d61..415e9b4d5408 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/zmii.c
+ * drivers/net/ethernet/ibm/emac/zmii.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
*
@@ -240,11 +240,8 @@ static int __devinit zmii_probe(struct platform_device *ofdev)
rc = -ENOMEM;
dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
- if (dev == NULL) {
- printk(KERN_ERR "%s: could not allocate ZMII device!\n",
- np->full_name);
+ if (dev == NULL)
goto err_gone;
- }
mutex_init(&dev->lock);
dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index ceaed823a83c..455bfb085493 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -1,5 +1,5 @@
/*
- * drivers/net/ibm_newemac/zmii.h
+ * drivers/net/ethernet/ibm/emac/zmii.h
*
* Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
*
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
deleted file mode 100644
index acc31af6594a..000000000000
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ /dev/null
@@ -1,1710 +0,0 @@
-/* File veth.c created by Kyle A. Lucke on Mon Aug 7 2000. */
-/*
- * IBM eServer iSeries Virtual Ethernet Device Driver
- * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp.
- * Substantially cleaned up by:
- * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
- * Copyright (C) 2004-2005 Michael Ellerman, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- *
- * This module implements the virtual ethernet device for iSeries LPAR
- * Linux. It uses hypervisor message passing to implement an
- * ethernet-like network device communicating between partitions on
- * the iSeries.
- *
- * The iSeries LPAR hypervisor currently allows for up to 16 different
- * virtual ethernets. These are all dynamically configurable on
- * OS/400 partitions, but dynamic configuration is not supported under
- * Linux yet. An ethXX network device will be created for each
- * virtual ethernet this partition is connected to.
- *
- * - This driver is responsible for routing packets to and from other
- * partitions. The MAC addresses used by the virtual ethernets
- * contains meaning and must not be modified.
- *
- * - Having 2 virtual ethernets to the same remote partition DOES NOT
- * double the available bandwidth. The 2 devices will share the
- * available hypervisor bandwidth.
- *
- * - If you send a packet to your own mac address, it will just be
- * dropped, you won't get it on the receive side.
- *
- * - Multicast is implemented by sending the frame frame to every
- * other partition. It is the responsibility of the receiving
- * partition to filter the addresses desired.
- *
- * Tunable parameters:
- *
- * VETH_NUMBUFFERS: This compile time option defaults to 120. It
- * controls how much memory Linux will allocate per remote partition
- * it is communicating with. It can be thought of as the maximum
- * number of packets outstanding to a remote partition at a time.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/ethtool.h>
-#include <linux/if_ether.h>
-#include <linux/slab.h>
-
-#include <asm/abs_addr.h>
-#include <asm/iseries/mf.h>
-#include <asm/uaccess.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iommu.h>
-#include <asm/vio.h>
-
-#undef DEBUG
-
-MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
-MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
-MODULE_LICENSE("GPL");
-
-#define VETH_EVENT_CAP (0)
-#define VETH_EVENT_FRAMES (1)
-#define VETH_EVENT_MONITOR (2)
-#define VETH_EVENT_FRAMES_ACK (3)
-
-#define VETH_MAX_ACKS_PER_MSG (20)
-#define VETH_MAX_FRAMES_PER_MSG (6)
-
-struct veth_frames_data {
- u32 addr[VETH_MAX_FRAMES_PER_MSG];
- u16 len[VETH_MAX_FRAMES_PER_MSG];
- u32 eofmask;
-};
-#define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG)
-
-struct veth_frames_ack_data {
- u16 token[VETH_MAX_ACKS_PER_MSG];
-};
-
-struct veth_cap_data {
- u8 caps_version;
- u8 rsvd1;
- u16 num_buffers;
- u16 ack_threshold;
- u16 rsvd2;
- u32 ack_timeout;
- u32 rsvd3;
- u64 rsvd4[3];
-};
-
-struct veth_lpevent {
- struct HvLpEvent base_event;
- union {
- struct veth_cap_data caps_data;
- struct veth_frames_data frames_data;
- struct veth_frames_ack_data frames_ack_data;
- } u;
-
-};
-
-#define DRV_NAME "iseries_veth"
-#define DRV_VERSION "2.0"
-
-#define VETH_NUMBUFFERS (120)
-#define VETH_ACKTIMEOUT (1000000) /* microseconds */
-#define VETH_MAX_MCAST (12)
-
-#define VETH_MAX_MTU (9000)
-
-#if VETH_NUMBUFFERS < 10
-#define ACK_THRESHOLD (1)
-#elif VETH_NUMBUFFERS < 20
-#define ACK_THRESHOLD (4)
-#elif VETH_NUMBUFFERS < 40
-#define ACK_THRESHOLD (10)
-#else
-#define ACK_THRESHOLD (20)
-#endif
-
-#define VETH_STATE_SHUTDOWN (0x0001)
-#define VETH_STATE_OPEN (0x0002)
-#define VETH_STATE_RESET (0x0004)
-#define VETH_STATE_SENTMON (0x0008)
-#define VETH_STATE_SENTCAPS (0x0010)
-#define VETH_STATE_GOTCAPACK (0x0020)
-#define VETH_STATE_GOTCAPS (0x0040)
-#define VETH_STATE_SENTCAPACK (0x0080)
-#define VETH_STATE_READY (0x0100)
-
-struct veth_msg {
- struct veth_msg *next;
- struct veth_frames_data data;
- int token;
- int in_use;
- struct sk_buff *skb;
- struct device *dev;
-};
-
-struct veth_lpar_connection {
- HvLpIndex remote_lp;
- struct delayed_work statemachine_wq;
- struct veth_msg *msgs;
- int num_events;
- struct veth_cap_data local_caps;
-
- struct kobject kobject;
- struct timer_list ack_timer;
-
- struct timer_list reset_timer;
- unsigned int reset_timeout;
- unsigned long last_contact;
- int outstanding_tx;
-
- spinlock_t lock;
- unsigned long state;
- HvLpInstanceId src_inst;
- HvLpInstanceId dst_inst;
- struct veth_lpevent cap_event, cap_ack_event;
- u16 pending_acks[VETH_MAX_ACKS_PER_MSG];
- u32 num_pending_acks;
-
- int num_ack_events;
- struct veth_cap_data remote_caps;
- u32 ack_timeout;
-
- struct veth_msg *msg_stack_head;
-};
-
-struct veth_port {
- struct device *dev;
- u64 mac_addr;
- HvLpIndexMap lpar_map;
-
- /* queue_lock protects the stopped_map and dev's queue. */
- spinlock_t queue_lock;
- HvLpIndexMap stopped_map;
-
- /* mcast_gate protects promiscuous, num_mcast & mcast_addr. */
- rwlock_t mcast_gate;
- int promiscuous;
- int num_mcast;
- u64 mcast_addr[VETH_MAX_MCAST];
-
- struct kobject kobject;
-};
-
-static HvLpIndex this_lp;
-static struct veth_lpar_connection *veth_cnx[HVMAXARCHITECTEDLPS]; /* = 0 */
-static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */
-
-static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *);
-static void veth_wake_queues(struct veth_lpar_connection *cnx);
-static void veth_stop_queues(struct veth_lpar_connection *cnx);
-static void veth_receive(struct veth_lpar_connection *, struct veth_lpevent *);
-static void veth_release_connection(struct kobject *kobject);
-static void veth_timed_ack(unsigned long ptr);
-static void veth_timed_reset(unsigned long ptr);
-
-/*
- * Utility functions
- */
-
-#define veth_info(fmt, args...) \
- printk(KERN_INFO DRV_NAME ": " fmt, ## args)
-
-#define veth_error(fmt, args...) \
- printk(KERN_ERR DRV_NAME ": Error: " fmt, ## args)
-
-#ifdef DEBUG
-#define veth_debug(fmt, args...) \
- printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
-#else
-#define veth_debug(fmt, args...) do {} while (0)
-#endif
-
-/* You must hold the connection's lock when you call this function. */
-static inline void veth_stack_push(struct veth_lpar_connection *cnx,
- struct veth_msg *msg)
-{
- msg->next = cnx->msg_stack_head;
- cnx->msg_stack_head = msg;
-}
-
-/* You must hold the connection's lock when you call this function. */
-static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx)
-{
- struct veth_msg *msg;
-
- msg = cnx->msg_stack_head;
- if (msg)
- cnx->msg_stack_head = cnx->msg_stack_head->next;
-
- return msg;
-}
-
-/* You must hold the connection's lock when you call this function. */
-static inline int veth_stack_is_empty(struct veth_lpar_connection *cnx)
-{
- return cnx->msg_stack_head == NULL;
-}
-
-static inline HvLpEvent_Rc
-veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype,
- HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype,
- u64 token,
- u64 data1, u64 data2, u64 data3, u64 data4, u64 data5)
-{
- return HvCallEvent_signalLpEventFast(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan,
- subtype, ackind, acktype,
- cnx->src_inst,
- cnx->dst_inst,
- token, data1, data2, data3,
- data4, data5);
-}
-
-static inline HvLpEvent_Rc veth_signaldata(struct veth_lpar_connection *cnx,
- u16 subtype, u64 token, void *data)
-{
- u64 *p = (u64 *) data;
-
- return veth_signalevent(cnx, subtype, HvLpEvent_AckInd_NoAck,
- HvLpEvent_AckType_ImmediateAck,
- token, p[0], p[1], p[2], p[3], p[4]);
-}
-
-struct veth_allocation {
- struct completion c;
- int num;
-};
-
-static void veth_complete_allocation(void *parm, int number)
-{
- struct veth_allocation *vc = (struct veth_allocation *)parm;
-
- vc->num = number;
- complete(&vc->c);
-}
-
-static int veth_allocate_events(HvLpIndex rlp, int number)
-{
- struct veth_allocation vc =
- { COMPLETION_INITIALIZER_ONSTACK(vc.c), 0 };
-
- mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan,
- sizeof(struct veth_lpevent), number,
- &veth_complete_allocation, &vc);
- wait_for_completion(&vc.c);
-
- return vc.num;
-}
-
-/*
- * sysfs support
- */
-
-struct veth_cnx_attribute {
- struct attribute attr;
- ssize_t (*show)(struct veth_lpar_connection *, char *buf);
- ssize_t (*store)(struct veth_lpar_connection *, const char *buf);
-};
-
-static ssize_t veth_cnx_attribute_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct veth_cnx_attribute *cnx_attr;
- struct veth_lpar_connection *cnx;
-
- cnx_attr = container_of(attr, struct veth_cnx_attribute, attr);
- cnx = container_of(kobj, struct veth_lpar_connection, kobject);
-
- if (!cnx_attr->show)
- return -EIO;
-
- return cnx_attr->show(cnx, buf);
-}
-
-#define CUSTOM_CNX_ATTR(_name, _format, _expression) \
-static ssize_t _name##_show(struct veth_lpar_connection *cnx, char *buf)\
-{ \
- return sprintf(buf, _format, _expression); \
-} \
-struct veth_cnx_attribute veth_cnx_attr_##_name = __ATTR_RO(_name)
-
-#define SIMPLE_CNX_ATTR(_name) \
- CUSTOM_CNX_ATTR(_name, "%lu\n", (unsigned long)cnx->_name)
-
-SIMPLE_CNX_ATTR(outstanding_tx);
-SIMPLE_CNX_ATTR(remote_lp);
-SIMPLE_CNX_ATTR(num_events);
-SIMPLE_CNX_ATTR(src_inst);
-SIMPLE_CNX_ATTR(dst_inst);
-SIMPLE_CNX_ATTR(num_pending_acks);
-SIMPLE_CNX_ATTR(num_ack_events);
-CUSTOM_CNX_ATTR(ack_timeout, "%d\n", jiffies_to_msecs(cnx->ack_timeout));
-CUSTOM_CNX_ATTR(reset_timeout, "%d\n", jiffies_to_msecs(cnx->reset_timeout));
-CUSTOM_CNX_ATTR(state, "0x%.4lX\n", cnx->state);
-CUSTOM_CNX_ATTR(last_contact, "%d\n", cnx->last_contact ?
- jiffies_to_msecs(jiffies - cnx->last_contact) : 0);
-
-#define GET_CNX_ATTR(_name) (&veth_cnx_attr_##_name.attr)
-
-static struct attribute *veth_cnx_default_attrs[] = {
- GET_CNX_ATTR(outstanding_tx),
- GET_CNX_ATTR(remote_lp),
- GET_CNX_ATTR(num_events),
- GET_CNX_ATTR(reset_timeout),
- GET_CNX_ATTR(last_contact),
- GET_CNX_ATTR(state),
- GET_CNX_ATTR(src_inst),
- GET_CNX_ATTR(dst_inst),
- GET_CNX_ATTR(num_pending_acks),
- GET_CNX_ATTR(num_ack_events),
- GET_CNX_ATTR(ack_timeout),
- NULL
-};
-
-static const struct sysfs_ops veth_cnx_sysfs_ops = {
- .show = veth_cnx_attribute_show
-};
-
-static struct kobj_type veth_lpar_connection_ktype = {
- .release = veth_release_connection,
- .sysfs_ops = &veth_cnx_sysfs_ops,
- .default_attrs = veth_cnx_default_attrs
-};
-
-struct veth_port_attribute {
- struct attribute attr;
- ssize_t (*show)(struct veth_port *, char *buf);
- ssize_t (*store)(struct veth_port *, const char *buf);
-};
-
-static ssize_t veth_port_attribute_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct veth_port_attribute *port_attr;
- struct veth_port *port;
-
- port_attr = container_of(attr, struct veth_port_attribute, attr);
- port = container_of(kobj, struct veth_port, kobject);
-
- if (!port_attr->show)
- return -EIO;
-
- return port_attr->show(port, buf);
-}
-
-#define CUSTOM_PORT_ATTR(_name, _format, _expression) \
-static ssize_t _name##_show(struct veth_port *port, char *buf) \
-{ \
- return sprintf(buf, _format, _expression); \
-} \
-struct veth_port_attribute veth_port_attr_##_name = __ATTR_RO(_name)
-
-#define SIMPLE_PORT_ATTR(_name) \
- CUSTOM_PORT_ATTR(_name, "%lu\n", (unsigned long)port->_name)
-
-SIMPLE_PORT_ATTR(promiscuous);
-SIMPLE_PORT_ATTR(num_mcast);
-CUSTOM_PORT_ATTR(lpar_map, "0x%X\n", port->lpar_map);
-CUSTOM_PORT_ATTR(stopped_map, "0x%X\n", port->stopped_map);
-CUSTOM_PORT_ATTR(mac_addr, "0x%llX\n", port->mac_addr);
-
-#define GET_PORT_ATTR(_name) (&veth_port_attr_##_name.attr)
-static struct attribute *veth_port_default_attrs[] = {
- GET_PORT_ATTR(mac_addr),
- GET_PORT_ATTR(lpar_map),
- GET_PORT_ATTR(stopped_map),
- GET_PORT_ATTR(promiscuous),
- GET_PORT_ATTR(num_mcast),
- NULL
-};
-
-static const struct sysfs_ops veth_port_sysfs_ops = {
- .show = veth_port_attribute_show
-};
-
-static struct kobj_type veth_port_ktype = {
- .sysfs_ops = &veth_port_sysfs_ops,
- .default_attrs = veth_port_default_attrs
-};
-
-/*
- * LPAR connection code
- */
-
-static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
-{
- schedule_delayed_work(&cnx->statemachine_wq, 0);
-}
-
-static void veth_take_cap(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnx->lock, flags);
- /* Receiving caps may mean the other end has just come up, so
- * we need to reload the instance ID of the far end */
- cnx->dst_inst =
- HvCallEvent_getTargetLpInstanceId(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan);
-
- if (cnx->state & VETH_STATE_GOTCAPS) {
- veth_error("Received a second capabilities from LPAR %d.\n",
- cnx->remote_lp);
- event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable;
- HvCallEvent_ackLpEvent((struct HvLpEvent *) event);
- } else {
- memcpy(&cnx->cap_event, event, sizeof(cnx->cap_event));
- cnx->state |= VETH_STATE_GOTCAPS;
- veth_kick_statemachine(cnx);
- }
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnx->lock, flags);
- if (cnx->state & VETH_STATE_GOTCAPACK) {
- veth_error("Received a second capabilities ack from LPAR %d.\n",
- cnx->remote_lp);
- } else {
- memcpy(&cnx->cap_ack_event, event,
- sizeof(cnx->cap_ack_event));
- cnx->state |= VETH_STATE_GOTCAPACK;
- veth_kick_statemachine(cnx);
- }
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_take_monitor_ack(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cnx->lock, flags);
- veth_debug("cnx %d: lost connection.\n", cnx->remote_lp);
-
- /* Avoid kicking the statemachine once we're shutdown.
- * It's unnecessary and it could break veth_stop_connection(). */
-
- if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
- cnx->state |= VETH_STATE_RESET;
- veth_kick_statemachine(cnx);
- }
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_handle_ack(struct veth_lpevent *event)
-{
- HvLpIndex rlp = event->base_event.xTargetLp;
- struct veth_lpar_connection *cnx = veth_cnx[rlp];
-
- BUG_ON(! cnx);
-
- switch (event->base_event.xSubtype) {
- case VETH_EVENT_CAP:
- veth_take_cap_ack(cnx, event);
- break;
- case VETH_EVENT_MONITOR:
- veth_take_monitor_ack(cnx, event);
- break;
- default:
- veth_error("Unknown ack type %d from LPAR %d.\n",
- event->base_event.xSubtype, rlp);
- }
-}
-
-static void veth_handle_int(struct veth_lpevent *event)
-{
- HvLpIndex rlp = event->base_event.xSourceLp;
- struct veth_lpar_connection *cnx = veth_cnx[rlp];
- unsigned long flags;
- int i, acked = 0;
-
- BUG_ON(! cnx);
-
- switch (event->base_event.xSubtype) {
- case VETH_EVENT_CAP:
- veth_take_cap(cnx, event);
- break;
- case VETH_EVENT_MONITOR:
- /* do nothing... this'll hang out here til we're dead,
- * and the hypervisor will return it for us. */
- break;
- case VETH_EVENT_FRAMES_ACK:
- spin_lock_irqsave(&cnx->lock, flags);
-
- for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) {
- u16 msgnum = event->u.frames_ack_data.token[i];
-
- if (msgnum < VETH_NUMBUFFERS) {
- veth_recycle_msg(cnx, cnx->msgs + msgnum);
- cnx->outstanding_tx--;
- acked++;
- }
- }
-
- if (acked > 0) {
- cnx->last_contact = jiffies;
- veth_wake_queues(cnx);
- }
-
- spin_unlock_irqrestore(&cnx->lock, flags);
- break;
- case VETH_EVENT_FRAMES:
- veth_receive(cnx, event);
- break;
- default:
- veth_error("Unknown interrupt type %d from LPAR %d.\n",
- event->base_event.xSubtype, rlp);
- }
-}
-
-static void veth_handle_event(struct HvLpEvent *event)
-{
- struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
-
- if (hvlpevent_is_ack(event))
- veth_handle_ack(veth_event);
- else
- veth_handle_int(veth_event);
-}
-
-static int veth_process_caps(struct veth_lpar_connection *cnx)
-{
- struct veth_cap_data *remote_caps = &cnx->remote_caps;
- int num_acks_needed;
-
- /* Convert timer to jiffies */
- cnx->ack_timeout = remote_caps->ack_timeout * HZ / 1000000;
-
- if ( (remote_caps->num_buffers == 0) ||
- (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) ||
- (remote_caps->ack_threshold == 0) ||
- (cnx->ack_timeout == 0) ) {
- veth_error("Received incompatible capabilities from LPAR %d.\n",
- cnx->remote_lp);
- return HvLpEvent_Rc_InvalidSubtypeData;
- }
-
- num_acks_needed = (remote_caps->num_buffers
- / remote_caps->ack_threshold) + 1;
-
- /* FIXME: locking on num_ack_events? */
- if (cnx->num_ack_events < num_acks_needed) {
- int num;
-
- num = veth_allocate_events(cnx->remote_lp,
- num_acks_needed-cnx->num_ack_events);
- if (num > 0)
- cnx->num_ack_events += num;
-
- if (cnx->num_ack_events < num_acks_needed) {
- veth_error("Couldn't allocate enough ack events "
- "for LPAR %d.\n", cnx->remote_lp);
-
- return HvLpEvent_Rc_BufferNotAvailable;
- }
- }
-
-
- return HvLpEvent_Rc_Good;
-}
-
-/* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(struct work_struct *work)
-{
- struct veth_lpar_connection *cnx =
- container_of(work, struct veth_lpar_connection,
- statemachine_wq.work);
- int rlp = cnx->remote_lp;
- int rc;
-
- spin_lock_irq(&cnx->lock);
-
- restart:
- if (cnx->state & VETH_STATE_RESET) {
- if (cnx->state & VETH_STATE_OPEN)
- HvCallEvent_closeLpEventPath(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan);
-
- /*
- * Reset ack data. This prevents the ack_timer actually
- * doing anything, even if it runs one more time when
- * we drop the lock below.
- */
- memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
- cnx->num_pending_acks = 0;
-
- cnx->state &= ~(VETH_STATE_RESET | VETH_STATE_SENTMON
- | VETH_STATE_OPEN | VETH_STATE_SENTCAPS
- | VETH_STATE_GOTCAPACK | VETH_STATE_GOTCAPS
- | VETH_STATE_SENTCAPACK | VETH_STATE_READY);
-
- /* Clean up any leftover messages */
- if (cnx->msgs) {
- int i;
- for (i = 0; i < VETH_NUMBUFFERS; ++i)
- veth_recycle_msg(cnx, cnx->msgs + i);
- }
-
- cnx->outstanding_tx = 0;
- veth_wake_queues(cnx);
-
- /* Drop the lock so we can do stuff that might sleep or
- * take other locks. */
- spin_unlock_irq(&cnx->lock);
-
- del_timer_sync(&cnx->ack_timer);
- del_timer_sync(&cnx->reset_timer);
-
- spin_lock_irq(&cnx->lock);
-
- if (cnx->state & VETH_STATE_RESET)
- goto restart;
-
- /* Hack, wait for the other end to reset itself. */
- if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
- schedule_delayed_work(&cnx->statemachine_wq, 5 * HZ);
- goto out;
- }
- }
-
- if (cnx->state & VETH_STATE_SHUTDOWN)
- /* It's all over, do nothing */
- goto out;
-
- if ( !(cnx->state & VETH_STATE_OPEN) ) {
- if (! cnx->msgs || (cnx->num_events < (2 + VETH_NUMBUFFERS)) )
- goto cant_cope;
-
- HvCallEvent_openLpEventPath(rlp, HvLpEvent_Type_VirtualLan);
- cnx->src_inst =
- HvCallEvent_getSourceLpInstanceId(rlp,
- HvLpEvent_Type_VirtualLan);
- cnx->dst_inst =
- HvCallEvent_getTargetLpInstanceId(rlp,
- HvLpEvent_Type_VirtualLan);
- cnx->state |= VETH_STATE_OPEN;
- }
-
- if ( (cnx->state & VETH_STATE_OPEN) &&
- !(cnx->state & VETH_STATE_SENTMON) ) {
- rc = veth_signalevent(cnx, VETH_EVENT_MONITOR,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_DeferredAck,
- 0, 0, 0, 0, 0, 0);
-
- if (rc == HvLpEvent_Rc_Good) {
- cnx->state |= VETH_STATE_SENTMON;
- } else {
- if ( (rc != HvLpEvent_Rc_PartitionDead) &&
- (rc != HvLpEvent_Rc_PathClosed) )
- veth_error("Error sending monitor to LPAR %d, "
- "rc = %d\n", rlp, rc);
-
- /* Oh well, hope we get a cap from the other
- * end and do better when that kicks us */
- goto out;
- }
- }
-
- if ( (cnx->state & VETH_STATE_OPEN) &&
- !(cnx->state & VETH_STATE_SENTCAPS)) {
- u64 *rawcap = (u64 *)&cnx->local_caps;
-
- rc = veth_signalevent(cnx, VETH_EVENT_CAP,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- 0, rawcap[0], rawcap[1], rawcap[2],
- rawcap[3], rawcap[4]);
-
- if (rc == HvLpEvent_Rc_Good) {
- cnx->state |= VETH_STATE_SENTCAPS;
- } else {
- if ( (rc != HvLpEvent_Rc_PartitionDead) &&
- (rc != HvLpEvent_Rc_PathClosed) )
- veth_error("Error sending caps to LPAR %d, "
- "rc = %d\n", rlp, rc);
-
- /* Oh well, hope we get a cap from the other
- * end and do better when that kicks us */
- goto out;
- }
- }
-
- if ((cnx->state & VETH_STATE_GOTCAPS) &&
- !(cnx->state & VETH_STATE_SENTCAPACK)) {
- struct veth_cap_data *remote_caps = &cnx->remote_caps;
-
- memcpy(remote_caps, &cnx->cap_event.u.caps_data,
- sizeof(*remote_caps));
-
- spin_unlock_irq(&cnx->lock);
- rc = veth_process_caps(cnx);
- spin_lock_irq(&cnx->lock);
-
- /* We dropped the lock, so recheck for anything which
- * might mess us up */
- if (cnx->state & (VETH_STATE_RESET|VETH_STATE_SHUTDOWN))
- goto restart;
-
- cnx->cap_event.base_event.xRc = rc;
- HvCallEvent_ackLpEvent((struct HvLpEvent *)&cnx->cap_event);
- if (rc == HvLpEvent_Rc_Good)
- cnx->state |= VETH_STATE_SENTCAPACK;
- else
- goto cant_cope;
- }
-
- if ((cnx->state & VETH_STATE_GOTCAPACK) &&
- (cnx->state & VETH_STATE_GOTCAPS) &&
- !(cnx->state & VETH_STATE_READY)) {
- if (cnx->cap_ack_event.base_event.xRc == HvLpEvent_Rc_Good) {
- /* Start the ACK timer */
- cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
- add_timer(&cnx->ack_timer);
- cnx->state |= VETH_STATE_READY;
- } else {
- veth_error("Caps rejected by LPAR %d, rc = %d\n",
- rlp, cnx->cap_ack_event.base_event.xRc);
- goto cant_cope;
- }
- }
-
- out:
- spin_unlock_irq(&cnx->lock);
- return;
-
- cant_cope:
- /* FIXME: we get here if something happens we really can't
- * cope with. The link will never work once we get here, and
- * all we can do is not lock the rest of the system up */
- veth_error("Unrecoverable error on connection to LPAR %d, shutting down"
- " (state = 0x%04lx)\n", rlp, cnx->state);
- cnx->state |= VETH_STATE_SHUTDOWN;
- spin_unlock_irq(&cnx->lock);
-}
-
-static int veth_init_connection(u8 rlp)
-{
- struct veth_lpar_connection *cnx;
- struct veth_msg *msgs;
- int i;
-
- if ( (rlp == this_lp) ||
- ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
- return 0;
-
- cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
- if (! cnx)
- return -ENOMEM;
-
- cnx->remote_lp = rlp;
- spin_lock_init(&cnx->lock);
- INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
-
- init_timer(&cnx->ack_timer);
- cnx->ack_timer.function = veth_timed_ack;
- cnx->ack_timer.data = (unsigned long) cnx;
-
- init_timer(&cnx->reset_timer);
- cnx->reset_timer.function = veth_timed_reset;
- cnx->reset_timer.data = (unsigned long) cnx;
- cnx->reset_timeout = 5 * HZ * (VETH_ACKTIMEOUT / 1000000);
-
- memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
-
- veth_cnx[rlp] = cnx;
-
- /* This gets us 1 reference, which is held on behalf of the driver
- * infrastructure. It's released at module unload. */
- kobject_init(&cnx->kobject, &veth_lpar_connection_ktype);
-
- msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL);
- if (! msgs) {
- veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
- return -ENOMEM;
- }
-
- cnx->msgs = msgs;
-
- for (i = 0; i < VETH_NUMBUFFERS; i++) {
- msgs[i].token = i;
- veth_stack_push(cnx, msgs + i);
- }
-
- cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS);
-
- if (cnx->num_events < (2 + VETH_NUMBUFFERS)) {
- veth_error("Can't allocate enough events for LPAR %d.\n", rlp);
- return -ENOMEM;
- }
-
- cnx->local_caps.num_buffers = VETH_NUMBUFFERS;
- cnx->local_caps.ack_threshold = ACK_THRESHOLD;
- cnx->local_caps.ack_timeout = VETH_ACKTIMEOUT;
-
- return 0;
-}
-
-static void veth_stop_connection(struct veth_lpar_connection *cnx)
-{
- if (!cnx)
- return;
-
- spin_lock_irq(&cnx->lock);
- cnx->state |= VETH_STATE_RESET | VETH_STATE_SHUTDOWN;
- veth_kick_statemachine(cnx);
- spin_unlock_irq(&cnx->lock);
-
- /* ensure the statemachine runs now and waits for its completion */
- flush_delayed_work_sync(&cnx->statemachine_wq);
-}
-
-static void veth_destroy_connection(struct veth_lpar_connection *cnx)
-{
- if (!cnx)
- return;
-
- if (cnx->num_events > 0)
- mf_deallocate_lp_events(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan,
- cnx->num_events,
- NULL, NULL);
- if (cnx->num_ack_events > 0)
- mf_deallocate_lp_events(cnx->remote_lp,
- HvLpEvent_Type_VirtualLan,
- cnx->num_ack_events,
- NULL, NULL);
-
- kfree(cnx->msgs);
- veth_cnx[cnx->remote_lp] = NULL;
- kfree(cnx);
-}
-
-static void veth_release_connection(struct kobject *kobj)
-{
- struct veth_lpar_connection *cnx;
- cnx = container_of(kobj, struct veth_lpar_connection, kobject);
- veth_stop_connection(cnx);
- veth_destroy_connection(cnx);
-}
-
-/*
- * net_device code
- */
-
-static int veth_open(struct net_device *dev)
-{
- netif_start_queue(dev);
- return 0;
-}
-
-static int veth_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static int veth_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
-static void veth_set_multicast_list(struct net_device *dev)
-{
- struct veth_port *port = netdev_priv(dev);
- unsigned long flags;
-
- write_lock_irqsave(&port->mcast_gate, flags);
-
- if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
- port->promiscuous = 1;
- } else {
- struct netdev_hw_addr *ha;
-
- port->promiscuous = 0;
-
- /* Update table */
- port->num_mcast = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- u8 *addr = ha->addr;
- u64 xaddr = 0;
-
- memcpy(&xaddr, addr, ETH_ALEN);
- port->mcast_addr[port->num_mcast] = xaddr;
- port->num_mcast++;
- }
- }
-
- write_unlock_irqrestore(&port->mcast_gate, flags);
-}
-
-static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
- info->driver[sizeof(info->driver) - 1] = '\0';
- strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
- info->version[sizeof(info->version) - 1] = '\0';
-}
-
-static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- ecmd->supported = (SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- ecmd->advertising = (SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = 0;
- ecmd->speed = SPEED_1000;
- ecmd->duplex = DUPLEX_FULL;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->maxtxpkt = 120;
- ecmd->maxrxpkt = 120;
- return 0;
-}
-
-static const struct ethtool_ops ops = {
- .get_drvinfo = veth_get_drvinfo,
- .get_settings = veth_get_settings,
- .get_link = ethtool_op_get_link,
-};
-
-static const struct net_device_ops veth_netdev_ops = {
- .ndo_open = veth_open,
- .ndo_stop = veth_close,
- .ndo_start_xmit = veth_start_xmit,
- .ndo_change_mtu = veth_change_mtu,
- .ndo_set_rx_mode = veth_set_multicast_list,
- .ndo_set_mac_address = NULL,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static struct net_device *veth_probe_one(int vlan,
- struct vio_dev *vio_dev)
-{
- struct net_device *dev;
- struct veth_port *port;
- struct device *vdev = &vio_dev->dev;
- int i, rc;
- const unsigned char *mac_addr;
-
- mac_addr = vio_get_attribute(vio_dev, "local-mac-address", NULL);
- if (mac_addr == NULL)
- mac_addr = vio_get_attribute(vio_dev, "mac-address", NULL);
- if (mac_addr == NULL) {
- veth_error("Unable to fetch MAC address from device tree.\n");
- return NULL;
- }
-
- dev = alloc_etherdev(sizeof (struct veth_port));
- if (! dev) {
- veth_error("Unable to allocate net_device structure!\n");
- return NULL;
- }
-
- port = netdev_priv(dev);
-
- spin_lock_init(&port->queue_lock);
- rwlock_init(&port->mcast_gate);
- port->stopped_map = 0;
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- HvLpVirtualLanIndexMap map;
-
- if (i == this_lp)
- continue;
- map = HvLpConfig_getVirtualLanIndexMapForLp(i);
- if (map & (0x8000 >> vlan))
- port->lpar_map |= (1 << i);
- }
- port->dev = vdev;
-
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
-
- dev->mtu = VETH_MAX_MTU;
-
- memcpy(&port->mac_addr, mac_addr, ETH_ALEN);
-
- dev->netdev_ops = &veth_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ops);
-
- SET_NETDEV_DEV(dev, vdev);
-
- rc = register_netdev(dev);
- if (rc != 0) {
- veth_error("Failed registering net device for vlan%d.\n", vlan);
- free_netdev(dev);
- return NULL;
- }
-
- kobject_init(&port->kobject, &veth_port_ktype);
- if (0 != kobject_add(&port->kobject, &dev->dev.kobj, "veth_port"))
- veth_error("Failed adding port for %s to sysfs.\n", dev->name);
-
- veth_info("%s attached to iSeries vlan %d (LPAR map = 0x%.4X)\n",
- dev->name, vlan, port->lpar_map);
-
- return dev;
-}
-
-/*
- * Tx path
- */
-
-static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
- struct net_device *dev)
-{
- struct veth_lpar_connection *cnx = veth_cnx[rlp];
- struct veth_port *port = netdev_priv(dev);
- HvLpEvent_Rc rc;
- struct veth_msg *msg = NULL;
- unsigned long flags;
-
- if (! cnx)
- return 0;
-
- spin_lock_irqsave(&cnx->lock, flags);
-
- if (! (cnx->state & VETH_STATE_READY))
- goto no_error;
-
- if ((skb->len - ETH_HLEN) > VETH_MAX_MTU)
- goto drop;
-
- msg = veth_stack_pop(cnx);
- if (! msg)
- goto drop;
-
- msg->in_use = 1;
- msg->skb = skb_get(skb);
-
- msg->data.addr[0] = dma_map_single(port->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
-
- if (dma_mapping_error(port->dev, msg->data.addr[0]))
- goto recycle_and_drop;
-
- msg->dev = port->dev;
- msg->data.len[0] = skb->len;
- msg->data.eofmask = 1 << VETH_EOF_SHIFT;
-
- rc = veth_signaldata(cnx, VETH_EVENT_FRAMES, msg->token, &msg->data);
-
- if (rc != HvLpEvent_Rc_Good)
- goto recycle_and_drop;
-
- /* If the timer's not already running, start it now. */
- if (0 == cnx->outstanding_tx)
- mod_timer(&cnx->reset_timer, jiffies + cnx->reset_timeout);
-
- cnx->last_contact = jiffies;
- cnx->outstanding_tx++;
-
- if (veth_stack_is_empty(cnx))
- veth_stop_queues(cnx);
-
- no_error:
- spin_unlock_irqrestore(&cnx->lock, flags);
- return 0;
-
- recycle_and_drop:
- veth_recycle_msg(cnx, msg);
- drop:
- spin_unlock_irqrestore(&cnx->lock, flags);
- return 1;
-}
-
-static void veth_transmit_to_many(struct sk_buff *skb,
- HvLpIndexMap lpmask,
- struct net_device *dev)
-{
- int i, success, error;
-
- success = error = 0;
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- if ((lpmask & (1 << i)) == 0)
- continue;
-
- if (veth_transmit_to_one(skb, i, dev))
- error = 1;
- else
- success = 1;
- }
-
- if (error)
- dev->stats.tx_errors++;
-
- if (success) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- }
-}
-
-static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned char *frame = skb->data;
- struct veth_port *port = netdev_priv(dev);
- HvLpIndexMap lpmask;
-
- if (is_unicast_ether_addr(frame)) {
- /* unicast packet */
- HvLpIndex rlp = frame[5];
-
- if ( ! ((1 << rlp) & port->lpar_map) ) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- lpmask = 1 << rlp;
- } else {
- lpmask = port->lpar_map;
- }
-
- veth_transmit_to_many(skb, lpmask, dev);
-
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-/* You must hold the connection's lock when you call this function. */
-static void veth_recycle_msg(struct veth_lpar_connection *cnx,
- struct veth_msg *msg)
-{
- u32 dma_address, dma_length;
-
- if (msg->in_use) {
- msg->in_use = 0;
- dma_address = msg->data.addr[0];
- dma_length = msg->data.len[0];
-
- if (!dma_mapping_error(msg->dev, dma_address))
- dma_unmap_single(msg->dev, dma_address, dma_length,
- DMA_TO_DEVICE);
-
- if (msg->skb) {
- dev_kfree_skb_any(msg->skb);
- msg->skb = NULL;
- }
-
- memset(&msg->data, 0, sizeof(msg->data));
- veth_stack_push(cnx, msg);
- } else if (cnx->state & VETH_STATE_OPEN) {
- veth_error("Non-pending frame (# %d) acked by LPAR %d.\n",
- cnx->remote_lp, msg->token);
- }
-}
-
-static void veth_wake_queues(struct veth_lpar_connection *cnx)
-{
- int i;
-
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
- struct net_device *dev = veth_dev[i];
- struct veth_port *port;
- unsigned long flags;
-
- if (! dev)
- continue;
-
- port = netdev_priv(dev);
-
- if (! (port->lpar_map & (1<<cnx->remote_lp)))
- continue;
-
- spin_lock_irqsave(&port->queue_lock, flags);
-
- port->stopped_map &= ~(1 << cnx->remote_lp);
-
- if (0 == port->stopped_map && netif_queue_stopped(dev)) {
- veth_debug("cnx %d: woke queue for %s.\n",
- cnx->remote_lp, dev->name);
- netif_wake_queue(dev);
- }
- spin_unlock_irqrestore(&port->queue_lock, flags);
- }
-}
-
-static void veth_stop_queues(struct veth_lpar_connection *cnx)
-{
- int i;
-
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
- struct net_device *dev = veth_dev[i];
- struct veth_port *port;
-
- if (! dev)
- continue;
-
- port = netdev_priv(dev);
-
- /* If this cnx is not on the vlan for this port, continue */
- if (! (port->lpar_map & (1 << cnx->remote_lp)))
- continue;
-
- spin_lock(&port->queue_lock);
-
- netif_stop_queue(dev);
- port->stopped_map |= (1 << cnx->remote_lp);
-
- veth_debug("cnx %d: stopped queue for %s, map = 0x%x.\n",
- cnx->remote_lp, dev->name, port->stopped_map);
-
- spin_unlock(&port->queue_lock);
- }
-}
-
-static void veth_timed_reset(unsigned long ptr)
-{
- struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)ptr;
- unsigned long trigger_time, flags;
-
- /* FIXME is it possible this fires after veth_stop_connection()?
- * That would reschedule the statemachine for 5 seconds and probably
- * execute it after the module's been unloaded. Hmm. */
-
- spin_lock_irqsave(&cnx->lock, flags);
-
- if (cnx->outstanding_tx > 0) {
- trigger_time = cnx->last_contact + cnx->reset_timeout;
-
- if (trigger_time < jiffies) {
- cnx->state |= VETH_STATE_RESET;
- veth_kick_statemachine(cnx);
- veth_error("%d packets not acked by LPAR %d within %d "
- "seconds, resetting.\n",
- cnx->outstanding_tx, cnx->remote_lp,
- cnx->reset_timeout / HZ);
- } else {
- /* Reschedule the timer */
- trigger_time = jiffies + cnx->reset_timeout;
- mod_timer(&cnx->reset_timer, trigger_time);
- }
- }
-
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-/*
- * Rx path
- */
-
-static inline int veth_frame_wanted(struct veth_port *port, u64 mac_addr)
-{
- int wanted = 0;
- int i;
- unsigned long flags;
-
- if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) )
- return 1;
-
- read_lock_irqsave(&port->mcast_gate, flags);
-
- if (port->promiscuous) {
- wanted = 1;
- goto out;
- }
-
- for (i = 0; i < port->num_mcast; ++i) {
- if (port->mcast_addr[i] == mac_addr) {
- wanted = 1;
- break;
- }
- }
-
- out:
- read_unlock_irqrestore(&port->mcast_gate, flags);
-
- return wanted;
-}
-
-struct dma_chunk {
- u64 addr;
- u64 size;
-};
-
-#define VETH_MAX_PAGES_PER_FRAME ( (VETH_MAX_MTU+PAGE_SIZE-2)/PAGE_SIZE + 1 )
-
-static inline void veth_build_dma_list(struct dma_chunk *list,
- unsigned char *p, unsigned long length)
-{
- unsigned long done;
- int i = 1;
-
- /* FIXME: skbs are contiguous in real addresses. Do we
- * really need to break it into PAGE_SIZE chunks, or can we do
- * it just at the granularity of iSeries real->absolute
- * mapping? Indeed, given the way the allocator works, can we
- * count on them being absolutely contiguous? */
- list[0].addr = iseries_hv_addr(p);
- list[0].size = min(length,
- PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
-
- done = list[0].size;
- while (done < length) {
- list[i].addr = iseries_hv_addr(p + done);
- list[i].size = min(length-done, PAGE_SIZE);
- done += list[i].size;
- i++;
- }
-}
-
-static void veth_flush_acks(struct veth_lpar_connection *cnx)
-{
- HvLpEvent_Rc rc;
-
- rc = veth_signaldata(cnx, VETH_EVENT_FRAMES_ACK,
- 0, &cnx->pending_acks);
-
- if (rc != HvLpEvent_Rc_Good)
- veth_error("Failed acking frames from LPAR %d, rc = %d\n",
- cnx->remote_lp, (int)rc);
-
- cnx->num_pending_acks = 0;
- memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks));
-}
-
-static void veth_receive(struct veth_lpar_connection *cnx,
- struct veth_lpevent *event)
-{
- struct veth_frames_data *senddata = &event->u.frames_data;
- int startchunk = 0;
- int nchunks;
- unsigned long flags;
- HvLpDma_Rc rc;
-
- do {
- u16 length = 0;
- struct sk_buff *skb;
- struct dma_chunk local_list[VETH_MAX_PAGES_PER_FRAME];
- struct dma_chunk remote_list[VETH_MAX_FRAMES_PER_MSG];
- u64 dest;
- HvLpVirtualLanIndex vlan;
- struct net_device *dev;
- struct veth_port *port;
-
- /* FIXME: do we need this? */
- memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(remote_list));
-
- /* a 0 address marks the end of the valid entries */
- if (senddata->addr[startchunk] == 0)
- break;
-
- /* make sure that we have at least 1 EOF entry in the
- * remaining entries */
- if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) {
- veth_error("Missing EOF fragment in event "
- "eofmask = 0x%x startchunk = %d\n",
- (unsigned)senddata->eofmask,
- startchunk);
- break;
- }
-
- /* build list of chunks in this frame */
- nchunks = 0;
- do {
- remote_list[nchunks].addr =
- (u64) senddata->addr[startchunk+nchunks] << 32;
- remote_list[nchunks].size =
- senddata->len[startchunk+nchunks];
- length += remote_list[nchunks].size;
- } while (! (senddata->eofmask &
- (1 << (VETH_EOF_SHIFT + startchunk + nchunks++))));
-
- /* length == total length of all chunks */
- /* nchunks == # of chunks in this frame */
-
- if ((length - ETH_HLEN) > VETH_MAX_MTU) {
- veth_error("Received oversize frame from LPAR %d "
- "(length = %d)\n",
- cnx->remote_lp, length);
- continue;
- }
-
- skb = alloc_skb(length, GFP_ATOMIC);
- if (!skb)
- continue;
-
- veth_build_dma_list(local_list, skb->data, length);
-
- rc = HvCallEvent_dmaBufList(HvLpEvent_Type_VirtualLan,
- event->base_event.xSourceLp,
- HvLpDma_Direction_RemoteToLocal,
- cnx->src_inst,
- cnx->dst_inst,
- HvLpDma_AddressType_RealAddress,
- HvLpDma_AddressType_TceIndex,
- iseries_hv_addr(&local_list),
- iseries_hv_addr(&remote_list),
- length);
- if (rc != HvLpDma_Rc_Good) {
- dev_kfree_skb_irq(skb);
- continue;
- }
-
- vlan = skb->data[9];
- dev = veth_dev[vlan];
- if (! dev) {
- /*
- * Some earlier versions of the driver sent
- * broadcasts down all connections, even to lpars
- * that weren't on the relevant vlan. So ignore
- * packets belonging to a vlan we're not on.
- * We can also be here if we receive packets while
- * the driver is going down, because then dev is NULL.
- */
- dev_kfree_skb_irq(skb);
- continue;
- }
-
- port = netdev_priv(dev);
- dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
-
- if ((vlan > HVMAXARCHITECTEDVIRTUALLANS) || !port) {
- dev_kfree_skb_irq(skb);
- continue;
- }
- if (! veth_frame_wanted(port, dest)) {
- dev_kfree_skb_irq(skb);
- continue;
- }
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, dev);
- skb_checksum_none_assert(skb);
- netif_rx(skb); /* send it up */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += length;
- } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG);
-
- /* Ack it */
- spin_lock_irqsave(&cnx->lock, flags);
- BUG_ON(cnx->num_pending_acks > VETH_MAX_ACKS_PER_MSG);
-
- cnx->pending_acks[cnx->num_pending_acks++] =
- event->base_event.xCorrelationToken;
-
- if ( (cnx->num_pending_acks >= cnx->remote_caps.ack_threshold) ||
- (cnx->num_pending_acks >= VETH_MAX_ACKS_PER_MSG) )
- veth_flush_acks(cnx);
-
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static void veth_timed_ack(unsigned long ptr)
-{
- struct veth_lpar_connection *cnx = (struct veth_lpar_connection *) ptr;
- unsigned long flags;
-
- /* Ack all the events */
- spin_lock_irqsave(&cnx->lock, flags);
- if (cnx->num_pending_acks > 0)
- veth_flush_acks(cnx);
-
- /* Reschedule the timer */
- cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
- add_timer(&cnx->ack_timer);
- spin_unlock_irqrestore(&cnx->lock, flags);
-}
-
-static int veth_remove(struct vio_dev *vdev)
-{
- struct veth_lpar_connection *cnx;
- struct net_device *dev;
- struct veth_port *port;
- int i;
-
- dev = veth_dev[vdev->unit_address];
-
- if (! dev)
- return 0;
-
- port = netdev_priv(dev);
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- cnx = veth_cnx[i];
-
- if (cnx && (port->lpar_map & (1 << i))) {
- /* Drop our reference to connections on our VLAN */
- kobject_put(&cnx->kobject);
- }
- }
-
- veth_dev[vdev->unit_address] = NULL;
- kobject_del(&port->kobject);
- kobject_put(&port->kobject);
- unregister_netdev(dev);
- free_netdev(dev);
-
- return 0;
-}
-
-static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
-{
- int i = vdev->unit_address;
- struct net_device *dev;
- struct veth_port *port;
-
- dev = veth_probe_one(i, vdev);
- if (dev == NULL) {
- veth_remove(vdev);
- return 1;
- }
- veth_dev[i] = dev;
-
- port = netdev_priv(dev);
-
- /* Start the state machine on each connection on this vlan. If we're
- * the first dev to do so this will commence link negotiation */
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- struct veth_lpar_connection *cnx;
-
- if (! (port->lpar_map & (1 << i)))
- continue;
-
- cnx = veth_cnx[i];
- if (!cnx)
- continue;
-
- kobject_get(&cnx->kobject);
- veth_kick_statemachine(cnx);
- }
-
- return 0;
-}
-
-/**
- * veth_device_table: Used by vio.c to match devices that we
- * support.
- */
-static struct vio_device_id veth_device_table[] __devinitdata = {
- { "network", "IBM,iSeries-l-lan" },
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, veth_device_table);
-
-static struct vio_driver veth_driver = {
- .id_table = veth_device_table,
- .probe = veth_probe,
- .remove = veth_remove,
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-/*
- * Module initialization/cleanup
- */
-
-static void __exit veth_module_cleanup(void)
-{
- int i;
- struct veth_lpar_connection *cnx;
-
- /* Disconnect our "irq" to stop events coming from the Hypervisor. */
- HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- cnx = veth_cnx[i];
-
- if (!cnx)
- continue;
-
- /* Cancel work queued from Hypervisor callbacks */
- cancel_delayed_work_sync(&cnx->statemachine_wq);
- /* Remove the connection from sysfs */
- kobject_del(&cnx->kobject);
- /* Drop the driver's reference to the connection */
- kobject_put(&cnx->kobject);
- }
-
- /* Unregister the driver, which will close all the netdevs and stop
- * the connections when they're no longer referenced. */
- vio_unregister_driver(&veth_driver);
-}
-module_exit(veth_module_cleanup);
-
-static int __init veth_module_init(void)
-{
- int i;
- int rc;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
- this_lp = HvLpConfig_getLpIndex_outline();
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- rc = veth_init_connection(i);
- if (rc != 0)
- goto error;
- }
-
- HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan,
- &veth_handle_event);
-
- rc = vio_register_driver(&veth_driver);
- if (rc != 0)
- goto error;
-
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- struct kobject *kobj;
-
- if (!veth_cnx[i])
- continue;
-
- kobj = &veth_cnx[i]->kobject;
- /* If the add failes, complain but otherwise continue */
- if (0 != driver_add_kobj(&veth_driver.driver, kobj,
- "cnx%.2d", veth_cnx[i]->remote_lp))
- veth_error("cnx %d: Failed adding to sysfs.\n", i);
- }
-
- return 0;
-
-error:
- for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
- veth_destroy_connection(veth_cnx[i]);
- }
-
- return rc;
-}
-module_init(veth_module_init);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 075451d0207d..1b563bb959c2 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -744,9 +744,6 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
return -ENOMEM;
}
- /* Associate the receive buffer with the IPG NIC. */
- skb->dev = dev;
-
/* Save the address of the sk_buff structure. */
sp->rx_buff[entry] = skb;
@@ -2233,7 +2230,6 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
*/
dev = alloc_etherdev(sizeof(struct ipg_nic_private));
if (!dev) {
- pr_err("%s: alloc_etherdev failed\n", pci_name(pdev));
rc = -ENOMEM;
goto err_disable_0;
}
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 9436397e5725..e498effb85d9 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -412,6 +412,10 @@ enum cb_status {
cb_ok = 0x2000,
};
+/**
+ * cb_command - Command Block flags
+ * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory
+ */
enum cb_command {
cb_nop = 0x0000,
cb_iaaddr = 0x0001,
@@ -421,6 +425,7 @@ enum cb_command {
cb_ucode = 0x0005,
cb_dump = 0x0006,
cb_tx_sf = 0x0008,
+ cb_tx_nc = 0x0010,
cb_cid = 0x1f00,
cb_i = 0x2000,
cb_s = 0x4000,
@@ -457,7 +462,7 @@ struct config {
/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
- rx_discard_overruns:1), rx_save_bad_frames:1);
+ rx_save_overruns : 1), rx_save_bad_frames : 1);
/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
tx_dynamic_tbd:1);
@@ -617,6 +622,7 @@ struct nic {
u32 rx_fc_pause;
u32 rx_fc_unsupported;
u32 rx_tco_frames;
+ u32 rx_short_frame_errors;
u32 rx_over_length_errors;
u16 eeprom_wc;
@@ -1075,7 +1081,7 @@ static void e100_get_defaults(struct nic *nic)
/* Template for a freshly allocated RFD */
nic->blank_rfd.command = 0;
nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
- nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
+ nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
/* MII setup */
nic->mii.phy_id_mask = 0x1F;
@@ -1089,6 +1095,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct config *config = &cb->u.config;
u8 *c = (u8 *)config;
+ struct net_device *netdev = nic->netdev;
cb->command = cpu_to_le16(cb_config);
@@ -1132,6 +1139,9 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
config->promiscuous_mode = 0x1; /* 1=on, 0=off */
}
+ if (unlikely(netdev->features & NETIF_F_RXFCS))
+ config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
+
if (nic->flags & multicast_all)
config->multicast_all = 0x1; /* 1=accept, 0=no */
@@ -1156,6 +1166,12 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
}
}
+ if (netdev->features & NETIF_F_RXALL) {
+ config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
+ config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
+ config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
+ }
+
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
"[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
@@ -1607,7 +1623,9 @@ static void e100_update_stats(struct nic *nic)
ns->collisions += nic->tx_collisions;
ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
le32_to_cpu(s->tx_lost_crs);
- ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
+ nic->rx_short_frame_errors +=
+ le32_to_cpu(s->rx_short_frame_errors);
+ ns->rx_length_errors = nic->rx_short_frame_errors +
nic->rx_over_length_errors;
ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
@@ -1720,6 +1738,16 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = nic->tx_command;
+
+ /*
+ * Use the last 4 bytes of the SKB payload packet as the CRC, used for
+ * testing, ie sending frames with bad CRC.
+ */
+ if (unlikely(skb->no_fcs))
+ cb->command |= __constant_cpu_to_le16(cb_tx_nc);
+ else
+ cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
+
/* interrupt every 16 packets regardless of delay */
if ((nic->cbs_avail & ~15) == nic->cbs_avail)
cb->command |= cpu_to_le16(cb_i);
@@ -1881,7 +1909,7 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
}
}
-#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
+#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
{
if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
@@ -1919,6 +1947,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
struct sk_buff *skb = rx->skb;
struct rfd *rfd = (struct rfd *)skb->data;
u16 rfd_status, actual_size;
+ u16 fcs_pad = 0;
if (unlikely(work_done && *work_done >= work_to_do))
return -EAGAIN;
@@ -1951,6 +1980,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
}
/* Get actual data size */
+ if (unlikely(dev->features & NETIF_F_RXFCS))
+ fcs_pad = 4;
actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
actual_size = RFD_BUF_LEN - sizeof(struct rfd);
@@ -1977,16 +2008,27 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
skb_put(skb, actual_size);
skb->protocol = eth_type_trans(skb, nic->netdev);
+ /* If we are receiving all frames, then don't bother
+ * checking for errors.
+ */
+ if (unlikely(dev->features & NETIF_F_RXALL)) {
+ if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
+ /* Received oversized frame, but keep it. */
+ nic->rx_over_length_errors++;
+ goto process_skb;
+ }
+
if (unlikely(!(rfd_status & cb_ok))) {
/* Don't indicate if hardware indicates errors */
dev_kfree_skb_any(skb);
- } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
+ } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
/* Don't indicate oversized frames */
nic->rx_over_length_errors++;
dev_kfree_skb_any(skb);
} else {
+process_skb:
dev->stats.rx_packets++;
- dev->stats.rx_bytes += actual_size;
+ dev->stats.rx_bytes += (actual_size - fcs_pad);
netif_receive_skb(skb);
if (work_done)
(*work_done)++;
@@ -2058,7 +2100,8 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
pci_dma_sync_single_for_device(nic->pdev,
old_before_last_rx->dma_addr, sizeof(struct rfd),
PCI_DMA_BIDIRECTIONAL);
- old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
+ old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
+ + ETH_FCS_LEN);
pci_dma_sync_single_for_device(nic->pdev,
old_before_last_rx->dma_addr, sizeof(struct rfd),
PCI_DMA_BIDIRECTIONAL);
@@ -2618,6 +2661,7 @@ static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
"tx_flow_control_pause", "rx_flow_control_pause",
"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
+ "rx_short_frame_errors", "rx_over_length_errors",
};
#define E100_NET_STATS_LEN 21
#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
@@ -2651,6 +2695,8 @@ static void e100_get_ethtool_stats(struct net_device *netdev,
data[i++] = nic->rx_fc_unsupported;
data[i++] = nic->tx_tco_frames;
data[i++] = nic->rx_tco_frames;
+ data[i++] = nic->rx_short_frame_errors;
+ data[i++] = nic->rx_over_length_errors;
}
static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
@@ -2729,6 +2775,20 @@ static int e100_close(struct net_device *netdev)
return 0;
}
+static int e100_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nic *nic = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
+ return 0;
+
+ netdev->features = features;
+ e100_exec_cb(nic, NULL, e100_configure);
+ return 0;
+}
+
static const struct net_device_ops e100_netdev_ops = {
.ndo_open = e100_open,
.ndo_stop = e100_close,
@@ -2742,6 +2802,7 @@ static const struct net_device_ops e100_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e100_netpoll,
#endif
+ .ndo_set_features = e100_set_features,
};
static int __devinit e100_probe(struct pci_dev *pdev,
@@ -2751,11 +2812,12 @@ static int __devinit e100_probe(struct pci_dev *pdev,
struct nic *nic;
int err;
- if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
- if (((1 << debug) - 1) & NETIF_MSG_PROBE)
- pr_err("Etherdev alloc failed, aborting\n");
+ if (!(netdev = alloc_etherdev(sizeof(struct nic))))
return -ENOMEM;
- }
+
+ netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->hw_features |= NETIF_F_RXALL;
netdev->netdev_ops = &e100_netdev_ops;
SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 1e1596990b5c..2b6cd02bfba0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -254,6 +254,7 @@ struct e1000_adapter {
atomic_t tx_fifo_stall;
bool pcix_82544;
bool detect_tx_hung;
+ bool dump_buffers;
/* RX */
bool (*clean_rx)(struct e1000_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 36ee76bf4cba..c526279e4927 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -5253,6 +5253,78 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D
+};
+
+static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw)
+{
+ u16 min_length, max_length;
+ u16 phy_data, i;
+ s32 ret_val;
+
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->dsp_config_state != e1000_dsp_config_enabled)
+ return 0;
+
+ if (min_length >= e1000_igp_cable_length_50) {
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state = e1000_dsp_config_activated;
+ } else {
+ u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u32 idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ hw->ffe_config_state = e1000_ffe_config_active;
+
+ ret_val = e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+
+ return 0;
+}
+
/**
* e1000_config_dsp_after_link_change
* @hw: Struct containing variables accessed by shared code
@@ -5269,13 +5341,6 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
{
s32 ret_val;
u16 phy_data, phy_saved_data, speed, duplex, i;
- static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
- IGP01E1000_PHY_AGC_PARAM_A,
- IGP01E1000_PHY_AGC_PARAM_B,
- IGP01E1000_PHY_AGC_PARAM_C,
- IGP01E1000_PHY_AGC_PARAM_D
- };
- u16 min_length, max_length;
e_dbg("e1000_config_dsp_after_link_change");
@@ -5290,84 +5355,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
if (speed == SPEED_1000) {
-
- ret_val =
- e1000_get_cable_length(hw, &min_length,
- &max_length);
+ ret_val = e1000_1000Mb_check_cable_length(hw);
if (ret_val)
return ret_val;
-
- if ((hw->dsp_config_state == e1000_dsp_config_enabled)
- && min_length >= e1000_igp_cable_length_50) {
-
- for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
- ret_val =
- e1000_read_phy_reg(hw,
- dsp_reg_array[i],
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &=
- ~IGP01E1000_PHY_EDAC_MU_INDEX;
-
- ret_val =
- e1000_write_phy_reg(hw,
- dsp_reg_array
- [i], phy_data);
- if (ret_val)
- return ret_val;
- }
- hw->dsp_config_state =
- e1000_dsp_config_activated;
- }
-
- if ((hw->ffe_config_state == e1000_ffe_config_enabled)
- && (min_length < e1000_igp_cable_length_50)) {
-
- u16 ffe_idle_err_timeout =
- FFE_IDLE_ERR_COUNT_TIMEOUT_20;
- u32 idle_errs = 0;
-
- /* clear previous idle error counts */
- ret_val =
- e1000_read_phy_reg(hw, PHY_1000T_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- for (i = 0; i < ffe_idle_err_timeout; i++) {
- udelay(1000);
- ret_val =
- e1000_read_phy_reg(hw,
- PHY_1000T_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- idle_errs +=
- (phy_data &
- SR_1000T_IDLE_ERROR_CNT);
- if (idle_errs >
- SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
- {
- hw->ffe_config_state =
- e1000_ffe_config_active;
-
- ret_val =
- e1000_write_phy_reg(hw,
- IGP01E1000_PHY_DSP_FFE,
- IGP01E1000_PHY_DSP_FFE_CM_CP);
- if (ret_val)
- return ret_val;
- break;
- }
-
- if (idle_errs)
- ffe_idle_err_timeout =
- FFE_IDLE_ERR_COUNT_TIMEOUT_100;
- }
- }
}
} else {
if (hw->dsp_config_state == e1000_dsp_config_activated) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index f6c4d7e2560c..11578c8978db 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -895,6 +895,11 @@ struct e1000_ffvt_entry {
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */
#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
@@ -1074,6 +1079,11 @@ struct e1000_ffvt_entry {
#define E1000_82542_IMC E1000_IMC
#define E1000_82542_RCTL E1000_RCTL
#define E1000_82542_RDTR 0x00108
+#define E1000_82542_RDFH E1000_RDFH
+#define E1000_82542_RDFT E1000_RDFT
+#define E1000_82542_RDFHS E1000_RDFHS
+#define E1000_82542_RDFTS E1000_RDFTS
+#define E1000_82542_RDFPC E1000_RDFPC
#define E1000_82542_RDBAL 0x00110
#define E1000_82542_RDBAH 0x00114
#define E1000_82542_RDLEN 0x00118
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d94d64b5d695..6419a88a69e6 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -730,10 +730,8 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
eeprom.offset = 0;
data = kmalloc(eeprom.len, GFP_KERNEL);
- if (!data) {
- pr_err("Unable to allocate memory to dump EEPROM data\n");
+ if (!data)
return;
- }
ops->get_eeprom(netdev, &eeprom, data);
@@ -1069,8 +1067,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->mac_type != e1000_82547))
netdev->hw_features |= NETIF_F_TSO;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_RXFCS;
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -2694,6 +2695,7 @@ set_itr_now:
#define E1000_TX_FLAGS_VLAN 0x00000002
#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_NO_FCS 0x00000010
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
@@ -2995,6 +2997,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
}
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
+ txd_lower &= ~(E1000_TXD_CMD_IFCS);
+
i = tx_ring->next_to_use;
while (count--) {
@@ -3009,6 +3014,10 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+ /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
+ tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -3224,6 +3233,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
+ if (unlikely(skb->no_fcs))
+ tx_flags |= E1000_TX_FLAGS_NO_FCS;
+
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
nr_frags, mss);
@@ -3241,6 +3253,215 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+#define NUM_REGS 38 /* 1 based count */
+static void e1000_regdump(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 regs[NUM_REGS];
+ u32 *regs_buff = regs;
+ int i = 0;
+
+ static const char * const reg_name[] = {
+ "CTRL", "STATUS",
+ "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
+ "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
+ "TIDV", "TXDCTL", "TADV", "TARC0",
+ "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
+ "TXDCTL1", "TARC1",
+ "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
+ "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
+ "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
+ };
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDBAL);
+ regs_buff[9] = er32(TDBAH);
+ regs_buff[10] = er32(TDLEN);
+ regs_buff[11] = er32(TDH);
+ regs_buff[12] = er32(TDT);
+ regs_buff[13] = er32(TIDV);
+ regs_buff[14] = er32(TXDCTL);
+ regs_buff[15] = er32(TADV);
+ regs_buff[16] = er32(TARC0);
+
+ regs_buff[17] = er32(TDBAL1);
+ regs_buff[18] = er32(TDBAH1);
+ regs_buff[19] = er32(TDLEN1);
+ regs_buff[20] = er32(TDH1);
+ regs_buff[21] = er32(TDT1);
+ regs_buff[22] = er32(TXDCTL1);
+ regs_buff[23] = er32(TARC1);
+ regs_buff[24] = er32(CTRL_EXT);
+ regs_buff[25] = er32(ERT);
+ regs_buff[26] = er32(RDBAL0);
+ regs_buff[27] = er32(RDBAH0);
+ regs_buff[28] = er32(TDFH);
+ regs_buff[29] = er32(TDFT);
+ regs_buff[30] = er32(TDFHS);
+ regs_buff[31] = er32(TDFTS);
+ regs_buff[32] = er32(TDFPC);
+ regs_buff[33] = er32(RDFH);
+ regs_buff[34] = er32(RDFT);
+ regs_buff[35] = er32(RDFHS);
+ regs_buff[36] = er32(RDFTS);
+ regs_buff[37] = er32(RDFPC);
+
+ pr_info("Register dump\n");
+ for (i = 0; i < NUM_REGS; i++)
+ pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
+}
+
+/*
+ * e1000_dump: Print registers, tx ring and rx ring
+ */
+static void e1000_dump(struct e1000_adapter *adapter)
+{
+ /* this code doesn't handle multiple rings */
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+ struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+ int i;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print Registers */
+ e1000_regdump(adapter);
+
+ /*
+ * transmit dump
+ */
+ pr_info("TX Desc ring0 dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
+ pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
+
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+ struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
+ struct my_u { u64 a; u64 b; };
+ struct my_u *u = (struct my_u *)tx_desc;
+ const char *type;
+
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ type = "NTC/U";
+ else if (i == tx_ring->next_to_use)
+ type = "NTU";
+ else if (i == tx_ring->next_to_clean)
+ type = "NTC";
+ else
+ type = "";
+
+ pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
+ ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
+ le64_to_cpu(u->a), le64_to_cpu(u->b),
+ (u64)buffer_info->dma, buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp, buffer_info->skb, type);
+ }
+
+rx_ring_summary:
+ /*
+ * receive dump
+ */
+ pr_info("\nRX Desc ring dump\n");
+
+ /* Legacy Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+ * +-----------------------------------------------------+
+ * 63 48 47 40 39 32 31 16 15 0
+ */
+ pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
+
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+ struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
+ struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
+ struct my_u { u64 a; u64 b; };
+ struct my_u *u = (struct my_u *)rx_desc;
+ const char *type;
+
+ if (i == rx_ring->next_to_use)
+ type = "NTU";
+ else if (i == rx_ring->next_to_clean)
+ type = "NTC";
+ else
+ type = "";
+
+ pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
+ i, le64_to_cpu(u->a), le64_to_cpu(u->b),
+ (u64)buffer_info->dma, buffer_info->skb, type);
+ } /* for */
+
+ /* dump the descriptor caches */
+ /* rx */
+ pr_info("Rx descriptor cache in 64bit format\n");
+ for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
+ pr_info("R%04X: %08X|%08X %08X|%08X\n",
+ i,
+ readl(adapter->hw.hw_addr + i+4),
+ readl(adapter->hw.hw_addr + i),
+ readl(adapter->hw.hw_addr + i+12),
+ readl(adapter->hw.hw_addr + i+8));
+ }
+ /* tx */
+ pr_info("Tx descriptor cache in 64bit format\n");
+ for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
+ pr_info("T%04X: %08X|%08X %08X|%08X\n",
+ i,
+ readl(adapter->hw.hw_addr + i+4),
+ readl(adapter->hw.hw_addr + i),
+ readl(adapter->hw.hw_addr + i+12),
+ readl(adapter->hw.hw_addr + i+8));
+ }
+exit:
+ return;
+}
+
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
@@ -3262,6 +3483,7 @@ static void e1000_reset_task(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->flags))
return;
+ e_err(drv, "Reset adapter\n");
e1000_reinit_safe(adapter);
}
@@ -3679,6 +3901,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop,
jiffies,
eop_desc->upper.fields.status);
+ e1000_dump(adapter);
netif_stop_queue(netdev);
}
}
@@ -3902,10 +4125,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
((u32)(rx_desc->errors) << 24),
le16_to_cpu(rx_desc->csum), skb);
- pskb_trim(skb, skb->len - 4);
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
+ total_rx_bytes += (skb->len - 4); /* don't count FCS */
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
+ pskb_trim(skb, skb->len - 4);
total_rx_packets++;
/* eth type trans needs skb->data to point to something */
@@ -4059,14 +4281,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
}
}
- /* adjust length to remove Ethernet CRC, this must be
- * done after the TBI_ACCEPT workaround above */
- length -= 4;
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += length;
+ total_rx_bytes += (length - 4); /* don't count FCS */
total_rx_packets++;
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
+ /* adjust length to remove Ethernet CRC, this must be
+ * done after the TBI_ACCEPT workaround above
+ */
+ length -= 4;
+
e1000_check_copybreak(netdev, buffer_info, length, &skb);
skb_put(skb, length);
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e1159e54334a..bac9dda31b6c 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -201,19 +201,23 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
* e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
* @hw: pointer to the HW structure
**/
-static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
{
- struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_mac_operations *func = &mac->ops;
- /* Set media type */
- switch (adapter->pdev->device) {
+ /* Set media type and media-dependent function pointers */
+ switch (hw->adapter->pdev->device) {
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.check_for_link = e1000e_check_for_serdes_link;
+ mac->ops.setup_physical_interface =
+ e1000e_setup_fiber_serdes_link;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.check_for_link = e1000e_check_for_copper_link;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_80003es2lan;
break;
}
@@ -230,25 +234,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Adaptive IFS not supported */
mac->adaptive_ifs = false;
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
- func->check_for_link = e1000e_check_for_copper_link;
- break;
- case e1000_media_type_fiber:
- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
- func->check_for_link = e1000e_check_for_fiber_link;
- break;
- case e1000_media_type_internal_serdes:
- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
- func->check_for_link = e1000e_check_for_serdes_link;
- break;
- default:
- return -E1000_ERR_CONFIG;
- break;
- }
-
/* set lan id for port to determine which phy lock to use */
hw->mac.ops.set_lan_id(hw);
@@ -260,7 +245,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
s32 rc;
- rc = e1000_init_mac_params_80003es2lan(adapter);
+ rc = e1000_init_mac_params_80003es2lan(hw);
if (rc)
return rc;
@@ -304,7 +289,7 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
}
/**
- * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
+ * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register
* @hw: pointer to the HW structure
*
* Acquire the semaphore to access the Kumeran interface.
@@ -320,7 +305,7 @@ static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
}
/**
- * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
+ * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register
* @hw: pointer to the HW structure
*
* Release the semaphore used to access the Kumeran interface
@@ -473,7 +458,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return ret_val;
}
- if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
/*
* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
@@ -485,9 +470,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- ret_val = -E1000_ERR_PHY;
e1000_release_phy_80003es2lan(hw);
- return ret_val;
+ return -E1000_ERR_PHY;
}
udelay(200);
@@ -545,7 +529,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return ret_val;
}
- if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
/*
* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
@@ -667,8 +651,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
udelay(1);
if (hw->phy.autoneg_wait_to_complete) {
- e_dbg("Waiting for forced speed/duplex link "
- "on GG82563 phy.\n");
+ e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
100000, &link);
@@ -731,22 +714,19 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
index = phy_data & GG82563_DSPD_CABLE_LENGTH;
- if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5)
+ return -E1000_ERR_PHY;
phy->min_cable_length = e1000_gg82563_cable_length_table[index];
phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-out:
- return ret_val;
+ return 0;
}
/**
@@ -820,9 +800,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
er32(ICR);
- ret_val = e1000_check_alt_mac_addr_generic(hw);
-
- return ret_val;
+ return e1000_check_alt_mac_addr_generic(hw);
}
/**
@@ -842,7 +820,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
e1000_initialize_hw_bits_80003es2lan(hw);
/* Initialize identification LED */
- ret_val = e1000e_id_led_init(hw);
+ ret_val = mac->ops.id_led_init(hw);
if (ret_val)
e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
@@ -860,7 +838,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
/* Setup link and flow control */
- ret_val = e1000e_setup_link(hw);
+ ret_val = mac->ops.setup_link(hw);
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
@@ -1078,7 +1056,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
* firmware will have already initialized them. We only initialize
* them if the HW is not in IAMT mode.
*/
- if (!e1000e_check_mng_mode(hw)) {
+ if (!hw->mac.ops.check_mng_mode(hw)) {
/* Enable Electrical Idle on the PHY */
data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
@@ -1163,9 +1141,7 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val = e1000e_setup_copper_link(hw);
-
- return 0;
+ return e1000e_setup_copper_link(hw);
}
/**
@@ -1241,9 +1217,7 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
else
reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
-
- return 0;
+ return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
}
/**
@@ -1285,9 +1259,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
- ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
- return ret_val;
+ return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
}
/**
@@ -1372,12 +1345,9 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
- goto out;
-
- ret_val = e1000_read_mac_addr_generic(hw);
+ return ret_val;
-out:
- return ret_val;
+ return e1000_read_mac_addr_generic(hw);
}
/**
@@ -1443,7 +1413,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
static const struct e1000_mac_operations es2_mac_ops = {
.read_mac_addr = e1000_read_mac_addr_80003es2lan,
- .id_led_init = e1000e_id_led_init,
+ .id_led_init = e1000e_id_led_init_generic,
.blink_led = e1000e_blink_led_generic,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
@@ -1459,9 +1429,10 @@ static const struct e1000_mac_operations es2_mac_ops = {
.clear_vfta = e1000_clear_vfta_generic,
.reset_hw = e1000_reset_hw_80003es2lan,
.init_hw = e1000_init_hw_80003es2lan,
- .setup_link = e1000e_setup_link,
+ .setup_link = e1000e_setup_link_generic,
/* setup_physical_interface dependent on media type */
.setup_led = e1000e_setup_led_generic,
+ .config_collision_dist = e1000e_config_collision_dist_generic,
};
static const struct e1000_phy_operations es2_phy_ops = {
@@ -1486,6 +1457,7 @@ static const struct e1000_nvm_operations es2_nvm_ops = {
.acquire = e1000_acquire_nvm_80003es2lan,
.read = e1000e_read_nvm_eerd,
.release = e1000_release_nvm_80003es2lan,
+ .reload = e1000e_reload_nvm_generic,
.update = e1000e_update_nvm_checksum_generic,
.valid_led_default = e1000e_valid_led_default,
.validate = e1000e_validate_nvm_checksum_generic,
@@ -1502,8 +1474,7 @@ const struct e1000_info e1000_es2_info = {
| FLAG_RX_NEEDS_RESTART /* errata */
| FLAG_TARC_SET_BIT_ZERO /* errata */
| FLAG_APME_CHECK_PORT_B
- | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
- | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
+ | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */
.flags2 = FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index a3e65fd26e09..b3fdc6977f2e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -235,30 +235,42 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
* e1000_init_mac_params_82571 - Init MAC func ptrs.
* @hw: pointer to the HW structure
**/
-static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
{
- struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_mac_operations *func = &mac->ops;
u32 swsm = 0;
u32 swsm2 = 0;
bool force_clear_smbi = false;
- /* Set media type */
- switch (adapter->pdev->device) {
+ /* Set media type and media-dependent function pointers */
+ switch (hw->adapter->pdev->device) {
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
hw->phy.media_type = e1000_media_type_fiber;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000e_check_for_fiber_link;
+ mac->ops.get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
break;
case E1000_DEV_ID_82571EB_SERDES:
- case E1000_DEV_ID_82572EI_SERDES:
case E1000_DEV_ID_82571EB_SERDES_DUAL:
case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82572EI_SERDES:
hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
+ mac->ops.get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_82571;
+ mac->ops.check_for_link = e1000e_check_for_copper_link;
+ mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper;
break;
}
@@ -269,38 +281,13 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- func->setup_physical_interface = e1000_setup_copper_link_82571;
- func->check_for_link = e1000e_check_for_copper_link;
- func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
- break;
- case e1000_media_type_fiber:
- func->setup_physical_interface =
- e1000_setup_fiber_serdes_link_82571;
- func->check_for_link = e1000e_check_for_fiber_link;
- func->get_link_up_info =
- e1000e_get_speed_and_duplex_fiber_serdes;
- break;
- case e1000_media_type_internal_serdes:
- func->setup_physical_interface =
- e1000_setup_fiber_serdes_link_82571;
- func->check_for_link = e1000_check_for_serdes_link_82571;
- func->get_link_up_info =
- e1000e_get_speed_and_duplex_fiber_serdes;
- break;
- default:
- return -E1000_ERR_CONFIG;
- break;
- }
-
+ /* MAC-specific function pointers */
switch (hw->mac.type) {
case e1000_82573:
- func->set_lan_id = e1000_set_lan_id_single_port;
- func->check_mng_mode = e1000e_check_mng_mode_generic;
- func->led_on = e1000e_led_on_generic;
- func->blink_led = e1000e_blink_led_generic;
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
+ mac->ops.led_on = e1000e_led_on_generic;
+ mac->ops.blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
@@ -314,14 +301,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
break;
case e1000_82574:
case e1000_82583:
- func->set_lan_id = e1000_set_lan_id_single_port;
- func->check_mng_mode = e1000_check_mng_mode_82574;
- func->led_on = e1000_led_on_82574;
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
+ mac->ops.led_on = e1000_led_on_82574;
break;
default:
- func->check_mng_mode = e1000e_check_mng_mode_generic;
- func->led_on = e1000e_led_on_generic;
- func->blink_led = e1000e_blink_led_generic;
+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
+ mac->ops.led_on = e1000e_led_on_generic;
+ mac->ops.blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
@@ -342,11 +329,11 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
if (!(swsm2 & E1000_SWSM2_LOCK)) {
/* Only do this for the first interface on this card */
- ew32(SWSM2,
- swsm2 | E1000_SWSM2_LOCK);
+ ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK);
force_clear_smbi = true;
- } else
+ } else {
force_clear_smbi = false;
+ }
break;
default:
force_clear_smbi = true;
@@ -383,7 +370,7 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
s32 rc;
- rc = e1000_init_mac_params_82571(adapter);
+ rc = e1000_init_mac_params_82571(hw);
if (rc)
return rc;
@@ -577,7 +564,6 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
- s32 ret_val = 0;
s32 i = 0;
extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,12 +585,10 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
/* Release semaphores */
e1000_put_hw_semaphore_82573(hw);
e_dbg("Driver can't access the PHY\n");
- ret_val = -E1000_ERR_PHY;
- goto out;
+ return -E1000_ERR_PHY;
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -809,7 +793,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
* otherwise, commit the checksum to the flash NVM.
*/
if (hw->nvm.type != e1000_nvm_flash_hw)
- return ret_val;
+ return 0;
/* Check for pending operations. */
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
@@ -1134,7 +1118,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
e1000_initialize_hw_bits_82571(hw);
/* Initialize identification LED */
- ret_val = e1000e_id_led_init(hw);
+ ret_val = mac->ops.id_led_init(hw);
if (ret_val)
e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
@@ -1159,7 +1143,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
/* Setup link and flow control */
- ret_val = e1000_setup_link_82571(hw);
+ ret_val = mac->ops.setup_link(hw);
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
@@ -1227,6 +1211,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82572:
reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
break;
+ case e1000_82574:
+ case e1000_82583:
+ reg |= (1 << 26);
+ break;
default:
break;
}
@@ -1281,18 +1269,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
reg |= E1000_PBA_ECC_CORR_EN;
ew32(PBA_ECC, reg);
}
+
/*
* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
-
- if ((hw->mac.type == e1000_82571) ||
- (hw->mac.type == e1000_82572)) {
- reg = er32(CTRL_EXT);
- reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
- ew32(CTRL_EXT, reg);
- }
-
+ if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, reg);
+ }
/* PCI-Ex Control Registers */
switch (hw->mac.type) {
@@ -1418,7 +1404,6 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
- bool phy_hung = false;
s32 ret_val = 0;
/*
@@ -1426,19 +1411,18 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
* read the Base1000T status register If both are max then PHY is hung.
*/
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
-
if (ret_val)
- goto out;
+ return false;
if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
if (ret_val)
- goto out;
+ return false;
if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
E1000_IDLE_ERROR_COUNT_MASK)
- phy_hung = true;
+ return true;
}
-out:
- return phy_hung;
+
+ return false;
}
/**
@@ -1469,7 +1453,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
break;
}
- return e1000e_setup_link(hw);
+ return e1000e_setup_link_generic(hw);
}
/**
@@ -1506,9 +1490,7 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val = e1000e_setup_copper_link(hw);
-
- return ret_val;
+ return e1000e_setup_copper_link(hw);
}
/**
@@ -1842,9 +1824,9 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
**/
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
- s32 ret_val = 0;
-
if (hw->mac.type == e1000_82571) {
+ s32 ret_val = 0;
+
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1852,13 +1834,10 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
- ret_val = e1000_read_mac_addr_generic(hw);
-
-out:
- return ret_val;
+ return e1000_read_mac_addr_generic(hw);
}
/**
@@ -1873,7 +1852,7 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
struct e1000_phy_info *phy = &hw->phy;
struct e1000_mac_info *mac = &hw->mac;
- if (!(phy->ops.check_reset_block))
+ if (!phy->ops.check_reset_block)
return;
/* If the management interface is not enabled, then power down */
@@ -1930,7 +1909,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
static const struct e1000_mac_operations e82571_mac_ops = {
/* .check_mng_mode: mac type dependent */
/* .check_for_link: media type dependent */
- .id_led_init = e1000e_id_led_init,
+ .id_led_init = e1000e_id_led_init_generic,
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
@@ -1946,6 +1925,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
+ .config_collision_dist = e1000e_config_collision_dist_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
};
@@ -2007,6 +1987,7 @@ static const struct e1000_nvm_operations e82571_nvm_ops = {
.acquire = e1000_acquire_nvm_82571,
.read = e1000e_read_nvm_eerd,
.release = e1000_release_nvm_82571,
+ .reload = e1000e_reload_nvm_generic,
.update = e1000_update_nvm_checksum_82571,
.valid_led_default = e1000_valid_led_default_82571,
.validate = e1000_validate_nvm_checksum_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 948c05db5d68..591b71324505 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2011 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -33,5 +33,6 @@
obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
- lib.o phy.o param.o ethtool.o netdev.o
+ mac.o manage.o nvm.o phy.o \
+ param.o ethtool.o netdev.o
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index c516a7440bec..3a5025917163 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -126,6 +126,13 @@
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
/* Management Control */
@@ -170,6 +177,7 @@
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
@@ -326,6 +334,7 @@
/* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
#define E1000_RFCTL_NFSW_DIS 0x00000040
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index f478a22ed577..86cdd4793992 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -234,6 +234,7 @@ struct e1000_buffer {
};
struct e1000_ring {
+ struct e1000_adapter *adapter; /* back pointer to adapter */
void *desc; /* pointer to ring memory */
dma_addr_t dma; /* phys address of ring */
unsigned int size; /* length of ring in bytes */
@@ -242,8 +243,8 @@ struct e1000_ring {
u16 next_to_use;
u16 next_to_clean;
- u16 head;
- u16 tail;
+ void __iomem *head;
+ void __iomem *tail;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
@@ -251,7 +252,7 @@ struct e1000_ring {
char name[IFNAMSIZ + 5];
u32 ims_val;
u32 itr_val;
- u16 itr_register;
+ void __iomem *itr_register;
int set_itr;
struct sk_buff *rx_skb_top;
@@ -334,11 +335,10 @@ struct e1000_adapter {
/*
* Rx
*/
- bool (*clean_rx) (struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
- ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_adapter *adapter,
- int cleaned_count, gfp_t gfp);
+ bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
@@ -398,6 +398,9 @@ struct e1000_adapter {
bool idle_check;
int phy_hang_count;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
};
struct e1000_info {
@@ -417,7 +420,7 @@ struct e1000_info {
#define FLAG_HAS_FLASH (1 << 1)
#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
#define FLAG_HAS_WOL (1 << 3)
-#define FLAG_HAS_ERT (1 << 4)
+/* reserved bit4 */
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
@@ -427,7 +430,7 @@ struct e1000_info {
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
-#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
+/* reserved bit14 */
#define FLAG_APME_IN_WUC (1 << 15)
#define FLAG_APME_IN_CTRL3 (1 << 16)
#define FLAG_APME_CHECK_PORT_B (1 << 17)
@@ -458,6 +461,7 @@ struct e1000_info {
#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
+#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -492,10 +496,10 @@ extern void e1000e_down(struct e1000_adapter *adapter);
extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
extern void e1000e_reset(struct e1000_adapter *adapter);
extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
-extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
-extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
+extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
+extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
+extern void e1000e_free_rx_resources(struct e1000_ring *ring);
+extern void e1000e_free_tx_resources(struct e1000_ring *ring);
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64
*stats);
@@ -555,12 +559,12 @@ extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u
extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init(struct e1000_hw *hw);
+extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link(struct e1000_hw *hw);
+extern s32 e1000e_setup_link_generic(struct e1000_hw *hw);
extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
@@ -571,7 +575,7 @@ extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist(struct e1000_hw *hw);
+extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
@@ -658,11 +662,6 @@ static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
return hw->phy.ops.reset(hw);
}
-static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
-{
- return hw->phy.ops.check_reset_block(hw);
-}
-
static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
{
return hw->phy.ops.read_reg(hw, offset, data);
@@ -685,7 +684,7 @@ extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);
-extern void e1000e_reload_nvm(struct e1000_hw *hw);
+extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
@@ -721,11 +720,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
return hw->phy.ops.get_info(hw);
}
-static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
-{
- return hw->mac.ops.check_mng_mode(hw);
-}
-
extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fb2c28e799a2..db35dd5d96de 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/vmalloc.h>
#include "e1000.h"
@@ -257,7 +258,7 @@ static int e1000_set_settings(struct net_device *netdev,
* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
- if (e1000_check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is "
"active.\n");
return -EINVAL;
@@ -536,7 +537,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
ptr++;
}
- if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
+ if (((eeprom->offset + eeprom->len) & 1) && (!ret_val))
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
ret_val = e1000_read_nvm(hw, last_word, 1,
@@ -552,7 +553,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = e1000_write_nvm(hw, first_word,
last_word - first_word + 1, eeprom_buff);
@@ -605,94 +606,112 @@ static void e1000_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_ring *tx_ring = adapter->tx_ring;
- struct e1000_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = E1000_MAX_RXD;
ring->tx_max_pending = E1000_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
}
static int e1000_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_ring *tx_ring, *tx_old;
- struct e1000_ring *rx_ring, *rx_old;
- int err;
+ struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
+ int err = 0, size = sizeof(struct e1000_ring);
+ bool set_tx = false, set_rx = false;
+ u16 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
+ new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD,
+ E1000_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
- if (netif_running(adapter->netdev))
- e1000e_down(adapter);
+ new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD,
+ E1000_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
- tx_old = adapter->tx_ring;
- rx_old = adapter->rx_ring;
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
+ /* nothing to do */
+ return 0;
- err = -ENOMEM;
- tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
- if (!tx_ring)
- goto err_alloc_tx;
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
- rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
- if (!rx_ring)
- goto err_alloc_rx;
+ if (!netif_running(adapter->netdev)) {
+ /* Set counts now and allocate resources during open() */
+ adapter->tx_ring->count = new_tx_count;
+ adapter->rx_ring->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
- adapter->tx_ring = tx_ring;
- adapter->rx_ring = rx_ring;
+ set_tx = (new_tx_count != adapter->tx_ring_count);
+ set_rx = (new_rx_count != adapter->rx_ring_count);
- rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
- rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
- rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+ /* Allocate temporary storage for ring updates */
+ if (set_tx) {
+ temp_tx = vmalloc(size);
+ if (!temp_tx) {
+ err = -ENOMEM;
+ goto free_temp;
+ }
+ }
+ if (set_rx) {
+ temp_rx = vmalloc(size);
+ if (!temp_rx) {
+ err = -ENOMEM;
+ goto free_temp;
+ }
+ }
- tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
- tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
- tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+ e1000e_down(adapter);
- if (netif_running(adapter->netdev)) {
- /* Try to get new resources before deleting old */
- err = e1000e_setup_rx_resources(adapter);
+ /*
+ * We can't just free everything and then setup again, because the
+ * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
+ * structs. First, attempt to allocate new resources...
+ */
+ if (set_tx) {
+ memcpy(temp_tx, adapter->tx_ring, size);
+ temp_tx->count = new_tx_count;
+ err = e1000e_setup_tx_resources(temp_tx);
if (err)
- goto err_setup_rx;
- err = e1000e_setup_tx_resources(adapter);
+ goto err_setup;
+ }
+ if (set_rx) {
+ memcpy(temp_rx, adapter->rx_ring, size);
+ temp_rx->count = new_rx_count;
+ err = e1000e_setup_rx_resources(temp_rx);
if (err)
- goto err_setup_tx;
+ goto err_setup_rx;
+ }
- /*
- * restore the old in order to free it,
- * then add in the new
- */
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- e1000e_free_rx_resources(adapter);
- e1000e_free_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
- adapter->rx_ring = rx_ring;
- adapter->tx_ring = tx_ring;
- err = e1000e_up(adapter);
- if (err)
- goto err_setup;
+ /* ...then free the old resources and copy back any new ring data */
+ if (set_tx) {
+ e1000e_free_tx_resources(adapter->tx_ring);
+ memcpy(adapter->tx_ring, temp_tx, size);
+ adapter->tx_ring_count = new_tx_count;
+ }
+ if (set_rx) {
+ e1000e_free_rx_resources(adapter->rx_ring);
+ memcpy(adapter->rx_ring, temp_rx, size);
+ adapter->rx_ring_count = new_rx_count;
}
- clear_bit(__E1000_RESETTING, &adapter->state);
- return 0;
-err_setup_tx:
- e1000e_free_rx_resources(adapter);
err_setup_rx:
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- kfree(rx_ring);
-err_alloc_rx:
- kfree(tx_ring);
-err_alloc_tx:
- e1000e_up(adapter);
+ if (err && set_tx)
+ e1000e_free_tx_resources(temp_tx);
err_setup:
+ e1000e_up(adapter);
+free_temp:
+ vfree(temp_tx);
+ vfree(temp_rx);
+clear_reset:
clear_bit(__E1000_RESETTING, &adapter->state);
return err;
}
@@ -1069,7 +1088,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info = kcalloc(tx_ring->count,
sizeof(struct e1000_buffer),
GFP_KERNEL);
- if (!(tx_ring->buffer_info)) {
+ if (!tx_ring->buffer_info) {
ret_val = 1;
goto err_nomem;
}
@@ -1131,7 +1150,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->buffer_info = kcalloc(rx_ring->count,
sizeof(struct e1000_buffer),
GFP_KERNEL);
- if (!(rx_ring->buffer_info)) {
+ if (!rx_ring->buffer_info) {
ret_val = 5;
goto err_nomem;
}
@@ -1579,11 +1598,13 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
+
/*
* PHY loopback cannot be performed if SoL/IDER
* sessions are active
*/
- if (e1000_check_reset_block(&adapter->hw)) {
+ if (hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
@@ -1837,11 +1858,11 @@ static int e1000_set_phys_id(struct net_device *netdev,
break;
case ETHTOOL_ID_ON:
- adapter->hw.mac.ops.led_on(&adapter->hw);
+ hw->mac.ops.led_on(hw);
break;
case ETHTOOL_ID_OFF:
- adapter->hw.mac.ops.led_off(&adapter->hw);
+ hw->mac.ops.led_off(hw);
break;
}
return 0;
@@ -1955,6 +1976,53 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
}
+static int e1000_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ info->data = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH: {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = er32(MRQC);
+
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
+ return 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
@@ -1981,6 +2049,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
+ .get_rxnfc = e1000_get_rxnfc,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 29670397079b..f82ecf536c8b 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -204,6 +204,7 @@ enum e1e_registers {
E1000_WUC = 0x05800, /* Wakeup Control - RW */
E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
E1000_WUS = 0x05810, /* Wakeup Status - RO */
+ E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */
E1000_MANC = 0x05820, /* Management Control - RW */
E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
E1000_HOST_IF = 0x08800, /* Host Interface */
@@ -219,6 +220,10 @@ enum e1e_registers {
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
+ E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
+#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
+ E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
+#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4))
E1000_FFLT_DBG = 0x05F04, /* Debug Register */
E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
@@ -776,6 +781,7 @@ struct e1000_mac_operations {
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*config_collision_dist)(struct e1000_hw *);
s32 (*read_mac_addr)(struct e1000_hw *);
};
@@ -824,6 +830,7 @@ struct e1000_nvm_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
+ void (*reload)(struct e1000_hw *);
s32 (*update)(struct e1000_hw *);
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
s32 (*validate)(struct e1000_hw *);
@@ -964,8 +971,8 @@ struct e1000_dev_spec_ich8lan {
struct e1000_hw {
struct e1000_adapter *adapter;
- u8 __iomem *hw_addr;
- u8 __iomem *flash_address;
+ void __iomem *hw_addr;
+ void __iomem *flash_address;
struct e1000_mac_info mac;
struct e1000_fc_info fc;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e2a80a283fd3..64c76443a7aa 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -145,6 +145,8 @@
#define I82579_EMI_ADDR 0x10
#define I82579_EMI_DATA 0x11
#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
@@ -278,8 +280,8 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
#define er16flash(reg) __er16flash(hw, (reg))
#define er32flash(reg) __er32flash(hw, (reg))
-#define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
-#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
+#define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
+#define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
{
@@ -304,7 +306,6 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- u32 fwsm;
s32 ret_val = 0;
phy->addr = 1;
@@ -323,14 +324,14 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- /*
- * The MAC-PHY interconnect may still be in SMBus mode
- * after Sx->S0. If the manageability engine (ME) is
- * disabled, then toggle the LANPHYPC Value bit to force
- * the interconnect to PCIe mode.
- */
- fwsm = er32(FWSM);
- if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
+ if (!hw->phy.ops.check_reset_block(hw)) {
+ u32 fwsm = er32(FWSM);
+
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode after
+ * Sx->S0. If resetting the PHY is not blocked, toggle the
+ * LANPHYPC Value bit to force the interconnect to PCIe mode.
+ */
e1000_toggle_lanphypc_value_ich8lan(hw);
msleep(50);
@@ -338,25 +339,26 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
* Gate automatic PHY configuration by hardware on
* non-managed 82579
*/
- if (hw->mac.type == e1000_pch2lan)
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID))
e1000_gate_hw_phy_config_ich8lan(hw, true);
- }
- /*
- * Reset the PHY before any access to it. Doing so, ensures that
- * the PHY is in a known good state before we read/write PHY registers.
- * The generic reset is sufficient here, because we haven't determined
- * the PHY type yet.
- */
- ret_val = e1000e_phy_hw_reset_generic(hw);
- if (ret_val)
- goto out;
+ /*
+ * Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ return ret_val;
- /* Ungate automatic PHY configuration on non-managed 82579 */
- if ((hw->mac.type == e1000_pch2lan) &&
- !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
- usleep_range(10000, 20000);
- e1000_gate_hw_phy_config_ich8lan(hw, false);
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ usleep_range(10000, 20000);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
}
phy->id = e1000_phy_unknown;
@@ -364,7 +366,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
default:
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
- goto out;
+ return ret_val;
if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
break;
/* fall-through */
@@ -375,10 +377,10 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
- goto out;
+ return ret_val;
break;
}
phy->type = e1000e_get_phy_type_from_id(phy->id);
@@ -404,7 +406,6 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
-out:
return ret_val;
}
@@ -551,9 +552,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
* Initialize family-specific MAC parameters and function
* pointers.
**/
-static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
+static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
{
- struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
/* Set media type function pointer */
@@ -580,7 +580,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* ID LED init */
- mac->ops.id_led_init = e1000e_id_led_init;
+ mac->ops.id_led_init = e1000e_id_led_init_generic;
/* blink LED */
mac->ops.blink_led = e1000e_blink_led_generic;
/* setup LED */
@@ -634,20 +634,18 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
u16 phy_reg;
if (hw->phy.type != e1000_phy_82579)
- goto out;
+ return 0;
ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
if (ret_val)
- goto out;
+ return ret_val;
if (hw->dev_spec.ich8lan.eee_disable)
phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
else
phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
-out:
- return ret_val;
+ return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
/**
@@ -671,10 +669,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
*/
- if (!mac->get_link_status) {
- ret_val = 0;
- goto out;
- }
+ if (!mac->get_link_status)
+ return 0;
/*
* First we want to see if the MII Status Register reports
@@ -683,16 +679,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_k1_gig_workaround_hv(hw, link);
if (ret_val)
- goto out;
+ return ret_val;
}
if (!link)
- goto out; /* No link detected */
+ return 0; /* No link detected */
mac->get_link_status = false;
@@ -700,13 +696,13 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
case e1000_pch2lan:
ret_val = e1000_k1_workaround_lv(hw);
if (ret_val)
- goto out;
+ return ret_val;
/* fall-thru */
case e1000_pchlan:
if (hw->phy.type == e1000_phy_82578) {
ret_val = e1000_link_stall_workaround_hv(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
/*
@@ -736,23 +732,21 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Enable/Disable EEE after link up */
ret_val = e1000_set_eee_pchlan(hw);
if (ret_val)
- goto out;
+ return ret_val;
/*
* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
- if (!mac->autoneg) {
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- }
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
/*
* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
- e1000e_config_collision_dist(hw);
+ mac->ops.config_collision_dist(hw);
/*
* Configure Flow Control now that Auto-Neg has completed.
@@ -764,7 +758,6 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
e_dbg("Error configuring flow control\n");
-out:
return ret_val;
}
@@ -773,7 +766,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
s32 rc;
- rc = e1000_init_mac_params_ich8lan(adapter);
+ rc = e1000_init_mac_params_ich8lan(hw);
if (rc)
return rc;
@@ -900,8 +893,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
}
if (!timeout) {
- e_dbg("Failed to acquire the semaphore, FW or HW has it: "
- "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+ e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
er32(FWSM), extcnf_ctrl);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -1008,15 +1000,13 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data &= ~HV_SMB_ADDR_MASK;
phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
-out:
- return ret_val;
+ return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
}
/**
@@ -1065,7 +1055,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
data = er32(FEXTNVM);
if (!(data & sw_cfg_mask))
- goto out;
+ goto release;
/*
* Make sure HW does not configure LCD from PHY
@@ -1074,14 +1064,14 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
data = er32(EXTCNF_CTRL);
if (!(hw->mac.type == e1000_pch2lan)) {
if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
- goto out;
+ goto release;
}
cnf_size = er32(EXTCNF_SIZE);
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
if (!cnf_size)
- goto out;
+ goto release;
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
@@ -1097,13 +1087,13 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000_write_smbus_addr(hw);
if (ret_val)
- goto out;
+ goto release;
data = er32(LEDCTL);
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
(u16)data);
if (ret_val)
- goto out;
+ goto release;
}
/* Configure LCD from extended configuration region. */
@@ -1115,12 +1105,12 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
&reg_data);
if (ret_val)
- goto out;
+ goto release;
ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1, &reg_addr);
if (ret_val)
- goto out;
+ goto release;
/* Save off the PHY page for future writes. */
if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
@@ -1134,10 +1124,10 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
reg_data);
if (ret_val)
- goto out;
+ goto release;
}
-out:
+release:
hw->phy.ops.release(hw);
return ret_val;
}
@@ -1159,12 +1149,12 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
if (hw->mac.type != e1000_pchlan)
- goto out;
+ return 0;
/* Wrap the whole flow with the sw flag */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
if (link) {
@@ -1218,7 +1208,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
release:
hw->phy.ops.release(hw);
-out:
+
return ret_val;
}
@@ -1240,22 +1230,20 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
u32 reg = 0;
u16 kmrn_reg = 0;
- ret_val = e1000e_read_kmrn_reg_locked(hw,
- E1000_KMRNCTRLSTA_K1_CONFIG,
- &kmrn_reg);
+ ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
if (ret_val)
- goto out;
+ return ret_val;
if (k1_enable)
kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
else
kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
- ret_val = e1000e_write_kmrn_reg_locked(hw,
- E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg);
+ ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
if (ret_val)
- goto out;
+ return ret_val;
udelay(20);
ctrl_ext = er32(CTRL_EXT);
@@ -1273,8 +1261,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
e1e_flush();
udelay(20);
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1302,18 +1289,18 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
if (!(hw->mac.type == e1000_pch2lan)) {
mac_reg = er32(EXTCNF_CTRL);
if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
- goto out;
+ goto release;
}
mac_reg = er32(FEXTNVM);
if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
- goto out;
+ goto release;
mac_reg = er32(PHY_CTRL);
ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
if (ret_val)
- goto out;
+ goto release;
oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
@@ -1325,7 +1312,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
oem_reg |= HV_OEM_BITS_LPLU;
/* Set Restart auto-neg to activate the bits */
- if (!e1000_check_reset_block(hw))
+ if (!hw->phy.ops.check_reset_block(hw))
oem_reg |= HV_OEM_BITS_RESTART_AN;
} else {
if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
@@ -1339,7 +1326,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
-out:
+release:
hw->phy.ops.release(hw);
return ret_val;
@@ -1376,13 +1363,13 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
u16 phy_data;
if (hw->mac.type != e1000_pchlan)
- return ret_val;
+ return 0;
/* Set MDIO slow mode before any other MDIO access */
if (hw->phy.type == e1000_phy_82577) {
ret_val = e1000_set_mdio_slow_mode_hv(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
if (((hw->phy.type == e1000_phy_82577) &&
@@ -1419,7 +1406,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
hw->phy.ops.release(hw);
if (ret_val)
- goto out;
+ return ret_val;
/*
* Configure the K1 Si workaround during phy reset assuming there is
@@ -1427,12 +1414,12 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
if (ret_val)
- goto out;
+ return ret_val;
/* Workaround for link disconnects on a busy hub in half duplex */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
if (ret_val)
goto release;
@@ -1440,7 +1427,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
phy_data & 0x00FF);
release:
hw->phy.ops.release(hw);
-out:
+
return ret_val;
}
@@ -1497,13 +1484,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
u16 i;
if (hw->mac.type != e1000_pch2lan)
- goto out;
+ return 0;
/* disable Rx path while enabling/disabling workaround */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
if (ret_val)
- goto out;
+ return ret_val;
if (enable) {
/*
@@ -1545,24 +1532,24 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
E1000_KMRNCTRLSTA_CTRL_OFFSET,
&data);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
data | (1 << 0));
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
E1000_KMRNCTRLSTA_HD_CTRL,
&data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~(0xF << 8);
data |= (0xB << 8);
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_HD_CTRL,
data);
if (ret_val)
- goto out;
+ return ret_val;
/* Enable jumbo frame workaround in the PHY */
e1e_rphy(hw, PHY_REG(769, 23), &data);
@@ -1570,25 +1557,25 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
data |= (0x37 << 5);
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
if (ret_val)
- goto out;
+ return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
data &= ~(1 << 13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
- goto out;
+ return ret_val;
e1e_rphy(hw, PHY_REG(776, 20), &data);
data &= ~(0x3FF << 2);
data |= (0x1A << 2);
ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
if (ret_val)
- goto out;
+ return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
if (ret_val)
- goto out;
+ return ret_val;
} else {
/* Write MAC register values back to h/w defaults */
mac_reg = er32(FFLT_DBG);
@@ -1603,56 +1590,53 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
E1000_KMRNCTRLSTA_CTRL_OFFSET,
&data);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
data & ~(1 << 0));
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
E1000_KMRNCTRLSTA_HD_CTRL,
&data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~(0xF << 8);
data |= (0xB << 8);
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_HD_CTRL,
data);
if (ret_val)
- goto out;
+ return ret_val;
/* Write PHY register values back to h/w defaults */
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
if (ret_val)
- goto out;
+ return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
data |= (1 << 13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
- goto out;
+ return ret_val;
e1e_rphy(hw, PHY_REG(776, 20), &data);
data &= ~(0x3FF << 2);
data |= (0x8 << 2);
ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
if (ret_val)
- goto out;
+ return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
if (ret_val)
- goto out;
+ return ret_val;
}
/* re-enable Rx path after enabling/disabling workaround */
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
-
-out:
- return ret_val;
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
}
/**
@@ -1664,12 +1648,31 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
s32 ret_val = 0;
if (hw->mac.type != e1000_pch2lan)
- goto out;
+ return 0;
/* Set MDIO slow mode before any other MDIO access */
ret_val = e1000_set_mdio_slow_mode_hv(hw);
-out:
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+ I82579_MSE_THRESHOLD);
+ if (ret_val)
+ goto release;
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+ I82579_MSE_LINK_DOWN);
+ if (ret_val)
+ goto release;
+ /* drop link after 5 times MSE threshold was reached */
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005);
+release:
+ hw->phy.ops.release(hw);
+
return ret_val;
}
@@ -1687,12 +1690,12 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
u16 phy_reg;
if (hw->mac.type != e1000_pch2lan)
- goto out;
+ return 0;
/* Set K1 beacon duration based on 1Gbps speed or otherwise */
ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
if (ret_val)
- goto out;
+ return ret_val;
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
@@ -1701,7 +1704,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
if (ret_val)
- goto out;
+ return ret_val;
if (status_reg & HV_M_STATUS_SPEED_1000) {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
@@ -1714,7 +1717,6 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
-out:
return ret_val;
}
@@ -1741,7 +1743,6 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
- return;
}
/**
@@ -1785,8 +1786,8 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
s32 ret_val = 0;
u16 reg;
- if (e1000_check_reset_block(hw))
- goto out;
+ if (hw->phy.ops.check_reset_block(hw))
+ return 0;
/* Allow time for h/w to get to quiescent state after reset */
usleep_range(10000, 20000);
@@ -1796,12 +1797,12 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
- goto out;
+ return ret_val;
break;
case e1000_pch2lan:
ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
if (ret_val)
- goto out;
+ return ret_val;
break;
default:
break;
@@ -1817,7 +1818,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
/* Configure the LCD with the extended configuration region in NVM */
ret_val = e1000_sw_lcd_config_ich8lan(hw);
if (ret_val)
- goto out;
+ return ret_val;
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
@@ -1832,18 +1833,16 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
/* Set EEE LPI Update Timer to 200usec */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
I82579_LPI_UPDATE_TIMER);
- if (ret_val)
- goto release;
- ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
- 0x1387);
-release:
+ if (!ret_val)
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ I82579_EMI_DATA,
+ 0x1387);
hw->phy.ops.release(hw);
}
-out:
return ret_val;
}
@@ -1866,12 +1865,9 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
- goto out;
-
- ret_val = e1000_post_phy_reset_ich8lan(hw);
+ return ret_val;
-out:
- return ret_val;
+ return e1000_post_phy_reset_ich8lan(hw);
}
/**
@@ -1892,18 +1888,17 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
if (ret_val)
- goto out;
+ return ret_val;
if (active)
oem_reg |= HV_OEM_BITS_LPLU;
else
oem_reg &= ~HV_OEM_BITS_LPLU;
- oem_reg |= HV_OEM_BITS_RESTART_AN;
- ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+ if (!hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
-out:
- return ret_val;
+ return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
}
/**
@@ -1927,7 +1922,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
u16 data;
if (phy->type == e1000_phy_ife)
- return ret_val;
+ return 0;
phy_ctrl = er32(PHY_CTRL);
@@ -2009,7 +2004,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
u32 phy_ctrl;
- s32 ret_val;
+ s32 ret_val = 0;
u16 data;
phy_ctrl = er32(PHY_CTRL);
@@ -2075,7 +2070,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
}
- return 0;
+ return ret_val;
}
/**
@@ -2093,7 +2088,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
u8 sig_byte = 0;
- s32 ret_val = 0;
+ s32 ret_val;
switch (hw->mac.type) {
case e1000_ich8lan:
@@ -2108,8 +2103,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
return 0;
}
- e_dbg("Unable to determine valid NVM bank via EEC - "
- "reading flash signature\n");
+ e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
/* fall-thru */
default:
/* set bank to 0 in case flash read fails */
@@ -2141,8 +2135,6 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
e_dbg("ERROR: No valid NVM bank present\n");
return -E1000_ERR_NVM;
}
-
- return 0;
}
/**
@@ -2221,8 +2213,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Check if the flash descriptor is valid */
if (hsfsts.hsf_status.fldesvalid == 0) {
- e_dbg("Flash descriptor invalid. "
- "SW Sequencing must be used.\n");
+ e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
return -E1000_ERR_NVM;
}
@@ -2251,21 +2242,21 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
- s32 i = 0;
+ s32 i;
/*
* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
*/
for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
- hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcinprog == 0) {
ret_val = 0;
break;
}
udelay(1);
}
- if (ret_val == 0) {
+ if (!ret_val) {
/*
* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done.
@@ -2291,7 +2282,6 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
{
union ich8_hws_flash_ctrl hsflctl;
union ich8_hws_flash_status hsfsts;
- s32 ret_val = -E1000_ERR_NVM;
u32 i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
@@ -2310,7 +2300,7 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
return 0;
- return ret_val;
+ return -E1000_ERR_NVM;
}
/**
@@ -2383,7 +2373,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
udelay(1);
/* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw);
- if (ret_val != 0)
+ if (ret_val)
break;
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
@@ -2403,7 +2393,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
* read in (shift in) the Flash Data0, the order is
* least significant byte first msb to lsb
*/
- if (ret_val == 0) {
+ if (!ret_val) {
flash_data = er32flash(ICH_FLASH_FDATA0);
if (size == 1)
*data = (u8)(flash_data & 0x000000FF);
@@ -2422,8 +2412,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* Repeat for some time before giving up. */
continue;
} else if (hsfsts.hsf_status.flcdone == 0) {
- e_dbg("Timeout error - flash cycle "
- "did not complete.\n");
+ e_dbg("Timeout error - flash cycle did not complete.\n");
break;
}
}
@@ -2618,7 +2607,7 @@ release:
* until after the next adapter reset.
*/
if (!ret_val) {
- e1000e_reload_nvm(hw);
+ nvm->ops.reload(hw);
usleep_range(10000, 20000);
}
@@ -2774,8 +2763,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* Repeat for some time before giving up. */
continue;
if (hsfsts.hsf_status.flcdone == 0) {
- e_dbg("Timeout error - flash cycle "
- "did not complete.");
+ e_dbg("Timeout error - flash cycle did not complete.\n");
break;
}
} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
@@ -2917,7 +2905,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
ret_val = e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_ERASE_COMMAND_TIMEOUT);
- if (ret_val == 0)
+ if (!ret_val)
break;
/*
@@ -2972,7 +2960,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
*
* PCH also does not have an "always on" or "always off" mode which
* complicates the ID feature. Instead of using the "on" mode to indicate
- * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
+ * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
* use "link_up" mode. The LEDs will still ID on request if there is no
* link based on logic in e1000_led_[on|off]_pchlan().
**/
@@ -2987,7 +2975,7 @@ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
/* Get default ID LED modes */
ret_val = hw->nvm.ops.valid_led_default(hw, &data);
if (ret_val)
- goto out;
+ return ret_val;
mac->ledctl_default = er32(LEDCTL);
mac->ledctl_mode1 = mac->ledctl_default;
@@ -3032,8 +3020,7 @@ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
}
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -3120,7 +3107,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
- if (!e1000_check_reset_block(hw)) {
+ if (!hw->phy.ops.check_reset_block(hw)) {
/*
* Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
@@ -3148,11 +3135,11 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1000_post_phy_reset_ich8lan(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
/*
@@ -3170,8 +3157,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
-out:
- return ret_val;
+ return 0;
}
/**
@@ -3224,7 +3210,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
}
/* Setup link and flow control */
- ret_val = e1000_setup_link_ich8lan(hw);
+ ret_val = mac->ops.setup_link(hw);
/* Set the transmit descriptor write-back policy for both queues */
txdctl = er32(TXDCTL(0));
@@ -3262,7 +3248,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
*/
e1000_clear_hw_cntrs_ich8lan(hw);
- return 0;
+ return ret_val;
}
/**
* e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
@@ -3339,7 +3325,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
{
s32 ret_val;
- if (e1000_check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block(hw))
return 0;
/*
@@ -3365,7 +3351,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
hw->fc.current_mode);
/* Continue to configure the copper link. */
- ret_val = e1000_setup_copper_link_ich8lan(hw);
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
if (ret_val)
return ret_val;
@@ -3465,6 +3451,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
default:
break;
}
+
return e1000e_setup_copper_link(hw);
}
@@ -3566,7 +3553,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
* @hw: pointer to the HW structure
* @state: boolean value used to set the current Kumeran workaround state
*
@@ -3676,9 +3663,10 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
*
* During S0 to Sx transition, it is possible the link remains at gig
* instead of negotiating to a lower speed. Before going to Sx, set
- * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
- * to a lower speed. For PCH and newer parts, the OEM bits PHY register
- * (LED, GbE disable and LPLU configurations) also needs to be written.
+ * 'Gig Disable' to force link speed negotiation to a lower speed based on
+ * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
+ * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
+ * needs to be written.
**/
void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -3686,7 +3674,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
s32 ret_val;
phy_ctrl = er32(PHY_CTRL);
- phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+ phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
if (hw->mac.type == e1000_ich8lan)
@@ -3714,47 +3702,41 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
**/
void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
{
- u32 fwsm;
+ u16 phy_id1, phy_id2;
+ s32 ret_val;
- if (hw->mac.type != e1000_pch2lan)
+ if ((hw->mac.type != e1000_pch2lan) ||
+ hw->phy.ops.check_reset_block(hw))
return;
- fwsm = er32(FWSM);
- if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
- u16 phy_id1, phy_id2;
- s32 ret_val;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- e_dbg("Failed to acquire PHY semaphore in resume\n");
- return;
- }
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_dbg("Failed to acquire PHY semaphore in resume\n");
+ return;
+ }
- /* Test access to the PHY registers by reading the ID regs */
- ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
- if (ret_val)
- goto release;
- ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
- if (ret_val)
- goto release;
+ /* Test access to the PHY registers by reading the ID regs */
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
+ if (ret_val)
+ goto release;
- if (hw->phy.id == ((u32)(phy_id1 << 16) |
- (u32)(phy_id2 & PHY_REVISION_MASK)))
- goto release;
+ if (hw->phy.id == ((u32)(phy_id1 << 16) |
+ (u32)(phy_id2 & PHY_REVISION_MASK)))
+ goto release;
- e1000_toggle_lanphypc_value_ich8lan(hw);
+ e1000_toggle_lanphypc_value_ich8lan(hw);
- hw->phy.ops.release(hw);
- msleep(50);
- e1000_phy_hw_reset(hw);
- msleep(50);
- return;
- }
+ hw->phy.ops.release(hw);
+ msleep(50);
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ return;
release:
hw->phy.ops.release(hw);
-
- return;
}
/**
@@ -4023,7 +4005,6 @@ release:
}
static const struct e1000_mac_operations ich8_mac_ops = {
- .id_led_init = e1000e_id_led_init,
/* check_mng_mode dependent on mac type */
.check_for_link = e1000_check_for_copper_link_ich8lan,
/* cleanup_led dependent on mac type */
@@ -4039,6 +4020,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
.setup_link = e1000_setup_link_ich8lan,
.setup_physical_interface= e1000_setup_copper_link_ich8lan,
/* id_led_init dependent on mac type */
+ .config_collision_dist = e1000e_config_collision_dist_generic,
};
static const struct e1000_phy_operations ich8_phy_ops = {
@@ -4059,6 +4041,7 @@ static const struct e1000_nvm_operations ich8_nvm_ops = {
.acquire = e1000_acquire_nvm_ich8lan,
.read = e1000_read_nvm_ich8lan,
.release = e1000_release_nvm_ich8lan,
+ .reload = e1000e_reload_nvm_generic,
.update = e1000_update_nvm_checksum_ich8lan,
.valid_led_default = e1000_valid_led_default_ich8lan,
.validate = e1000_validate_nvm_checksum_ich8lan,
@@ -4088,10 +4071,9 @@ const struct e1000_info e1000_ich9_info = {
| FLAG_HAS_WOL
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
- | FLAG_HAS_ERT
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
- .pba = 10,
+ .pba = 18,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
@@ -4106,10 +4088,9 @@ const struct e1000_info e1000_ich10_info = {
| FLAG_HAS_WOL
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
- | FLAG_HAS_ERT
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
- .pba = 10,
+ .pba = 18,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 0893ab107adf..decad98c1059 100644
--- a/drivers/net/ethernet/intel/e1000e/lib.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,19 +28,6 @@
#include "e1000.h"
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE 0x544D4149
-
/**
* e1000e_get_bus_info_pcie - Get PCIe bus information
* @hw: pointer to the HW structure
@@ -151,7 +138,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ALEN] = { 0 };
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -159,7 +146,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
e1000e_rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
- e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+ e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
for (i = 1; i < rar_count; i++)
e1000e_rar_set(hw, mac_addr, i);
}
@@ -185,26 +172,23 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
if (ret_val)
- goto out;
+ return ret_val;
- /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
- if (!((nvm_data & NVM_COMPAT_LOM) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
- goto out;
+ /* not supported on 82573 */
+ if (hw->mac.type == e1000_82573)
+ return 0;
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &nvm_alt_mac_addr_offset);
+ &nvm_alt_mac_addr_offset);
if (ret_val) {
e_dbg("NVM Read Error\n");
- goto out;
+ return ret_val;
}
if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
(nvm_alt_mac_addr_offset == 0x0000))
/* There is no Alternate MAC Address */
- goto out;
+ return 0;
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
@@ -213,7 +197,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
if (ret_val) {
e_dbg("NVM Read Error\n");
- goto out;
+ return ret_val;
}
alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
@@ -223,7 +207,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
/* if multicast bit is set, the alternate address will not be used */
if (is_multicast_ether_addr(alt_mac_addr)) {
e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
- goto out;
+ return 0;
}
/*
@@ -233,8 +217,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
*/
e1000e_rar_set(hw, alt_mac_addr, 0);
-out:
- return ret_val;
+ return 0;
}
/**
@@ -254,11 +237,10 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
- rar_low = ((u32) addr[0] |
- ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
/* If MAC address zero, no need to set the AV bit */
if (rar_low || rar_high)
@@ -281,8 +263,7 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
- * the multicast filter table array address and new table value. See
- * e1000_mta_set_generic()
+ * the multicast filter table array address and new table value.
**/
static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
@@ -318,7 +299,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* values resulting from each mc_filter_type...
* [0] [1] [2] [3] [4] [5]
* 01 AA 00 12 34 56
- * LSB MSB
+ * LSB MSB
*
* case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
* case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
@@ -341,7 +322,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
}
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
- (((u16) mc_addr[5]) << bit_shift)));
+ (((u16)mc_addr[5]) << bit_shift)));
return hash_value;
}
@@ -365,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* update mta_shadow from mc_addr_list */
- for (i = 0; (u32) i < mc_addr_count; i++) {
+ for (i = 0; (u32)i < mc_addr_count; i++) {
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
@@ -461,7 +442,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
return ret_val;
if (!link)
- return ret_val; /* No link detected */
+ return 0; /* No link detected */
mac->get_link_status = false;
@@ -475,17 +456,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
- if (!mac->autoneg) {
- ret_val = -E1000_ERR_CONFIG;
- return ret_val;
- }
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
/*
* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
- e1000e_config_collision_dist(hw);
+ mac->ops.config_collision_dist(hw);
/*
* Configure Flow Control now that Auto-Neg has completed.
@@ -528,10 +507,10 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
* was just plugged in. The autoneg_failed flag does this.
*/
/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
- if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
- (!(rxcw & E1000_RXCW_C))) {
- if (mac->autoneg_failed == 0) {
- mac->autoneg_failed = 1;
+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
+ !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
return 0;
}
e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
@@ -594,9 +573,9 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* time to complete.
*/
/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
- if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
- if (mac->autoneg_failed == 0) {
- mac->autoneg_failed = 1;
+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
return 0;
}
e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
@@ -650,18 +629,16 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
if (E1000_TXCW_ANE & er32(TXCW)) {
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
- /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
udelay(10);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
mac->serdes_has_link = true;
- e_dbg("SERDES: Link up - autoneg "
- "completed successfully.\n");
+ e_dbg("SERDES: Link up - autoneg completed successfully.\n");
} else {
mac->serdes_has_link = false;
- e_dbg("SERDES: Link down - invalid"
- "codewords detected in autoneg.\n");
+ e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n");
}
} else {
mac->serdes_has_link = false;
@@ -706,8 +683,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
hw->fc.requested_mode = e1000_fc_none;
- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
- NVM_WORD0F_ASM_DIR)
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
hw->fc.requested_mode = e1000_fc_tx_pause;
else
hw->fc.requested_mode = e1000_fc_full;
@@ -716,7 +692,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
}
/**
- * e1000e_setup_link - Setup flow control and link settings
+ * e1000e_setup_link_generic - Setup flow control and link settings
* @hw: pointer to the HW structure
*
* Determines which flow control settings to use, then configures flow
@@ -725,16 +701,15 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
* should be established. Assumes the hardware has previously been reset
* and the transmitter and receiver are not enabled.
**/
-s32 e1000e_setup_link(struct e1000_hw *hw)
+s32 e1000e_setup_link_generic(struct e1000_hw *hw)
{
- struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
/*
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (e1000_check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block(hw))
return 0;
/*
@@ -753,11 +728,10 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
*/
hw->fc.current_mode = hw->fc.requested_mode;
- e_dbg("After fix-ups FlowControl is now = %x\n",
- hw->fc.current_mode);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
/* Call the necessary media_type subroutine to configure the link. */
- ret_val = mac->ops.setup_physical_interface(hw);
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
if (ret_val)
return ret_val;
@@ -876,7 +850,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
}
if (i == FIBER_LINK_UP_LIMIT) {
e_dbg("Never got a valid link from auto-neg!!!\n");
- mac->autoneg_failed = 1;
+ mac->autoneg_failed = true;
/*
* AutoNeg failed to achieve a link, so we'll call
* mac->check_for_link. This routine will force the
@@ -888,9 +862,9 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
e_dbg("Error while checking for link\n");
return ret_val;
}
- mac->autoneg_failed = 0;
+ mac->autoneg_failed = false;
} else {
- mac->autoneg_failed = 0;
+ mac->autoneg_failed = false;
e_dbg("Valid Link Found\n");
}
@@ -914,7 +888,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
/* Take the link out of reset */
ctrl &= ~E1000_CTRL_LRST;
- e1000e_config_collision_dist(hw);
+ hw->mac.ops.config_collision_dist(hw);
ret_val = e1000_commit_fc_settings_generic(hw);
if (ret_val)
@@ -945,18 +919,17 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("No signal detected\n");
}
- return 0;
+ return ret_val;
}
/**
- * e1000e_config_collision_dist - Configure collision distance
+ * e1000e_config_collision_dist_generic - Configure collision distance
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
- * during link setup. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
+ * during link setup.
**/
-void e1000e_config_collision_dist(struct e1000_hw *hw)
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw)
{
u32 tctl;
@@ -995,7 +968,9 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
* XON frames.
*/
fcrtl = hw->fc.low_water;
- fcrtl |= E1000_FCRTL_XONE;
+ if (hw->fc.send_xon)
+ fcrtl |= E1000_FCRTL_XONE;
+
fcrth = hw->fc.high_water;
}
ew32(FCRTL, fcrtl);
@@ -1121,8 +1096,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- e_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ e_dbg("Copper PHY and Auto Neg has not completed.\n");
return ret_val;
}
@@ -1186,11 +1160,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full;
- e_dbg("Flow Control = FULL.\r\n");
+ e_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
- e_dbg("Flow Control = "
- "Rx PAUSE frames only.\r\n");
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
}
}
/*
@@ -1202,11 +1175,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*/
else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
- e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
+ e_dbg("Flow Control = Tx PAUSE frames only.\n");
}
/*
* For transmitting PAUSE frames ONLY.
@@ -1221,14 +1194,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
- e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
/*
* Per the IEEE spec, at this point flow control
* should be disabled.
*/
hw->fc.current_mode = e1000_fc_none;
- e_dbg("Flow Control = NONE.\r\n");
+ e_dbg("Flow Control = NONE.\n");
}
/*
@@ -1268,7 +1241,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* Read the status register for the current speed/duplex and store the current
* speed and duplex for copper connections.
**/
-s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
{
u32 status;
@@ -1301,7 +1275,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
* Sets the speed and duplex to gigabit full duplex (the only possible option)
* for fiber/serdes links.
**/
-s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
{
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
@@ -1423,11 +1398,11 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
}
/**
- * e1000e_id_led_init -
+ * e1000e_id_led_init_generic -
* @hw: pointer to the HW structure
*
**/
-s32 e1000e_id_led_init(struct e1000_hw *hw)
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
@@ -1504,11 +1479,10 @@ s32 e1000e_setup_led_generic(struct e1000_hw *hw)
ledctl = er32(LEDCTL);
hw->mac.ledctl_default = ledctl;
/* Turn off LED0 */
- ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
- E1000_LEDCTL_LED0_BLINK |
- E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
- E1000_LEDCTL_LED0_MODE_SHIFT);
+ E1000_LEDCTL_LED0_MODE_SHIFT);
ew32(LEDCTL, ledctl);
} else if (hw->phy.media_type == e1000_media_type_copper) {
ew32(LEDCTL, hw->mac.ledctl_mode1);
@@ -1544,7 +1518,7 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
if (hw->phy.media_type == e1000_media_type_fiber) {
/* always blink LED0 for PCI-E fiber */
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
/*
* set the blink bit for each LED that's "on" (0x0E)
@@ -1657,8 +1631,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
ew32(CTRL, ctrl);
while (timeout) {
- if (!(er32(STATUS) &
- E1000_STATUS_GIO_MASTER_ENABLE))
+ if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break;
udelay(100);
timeout--;
@@ -1684,7 +1657,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
if (!mac->adaptive_ifs) {
e_dbg("Not in Adaptive IFS mode!\n");
- goto out;
+ return;
}
mac->current_ifs_val = 0;
@@ -1695,8 +1668,6 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
mac->in_ifs_mode = false;
ew32(AIT, 0);
-out:
- return;
}
/**
@@ -1712,7 +1683,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
if (!mac->adaptive_ifs) {
e_dbg("Not in Adaptive IFS mode!\n");
- goto out;
+ return;
}
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
@@ -1723,7 +1694,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
mac->current_ifs_val = mac->ifs_min_val;
else
mac->current_ifs_val +=
- mac->ifs_step_size;
+ mac->ifs_step_size;
ew32(AIT, mac->current_ifs_val);
}
}
@@ -1735,959 +1706,4 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
ew32(AIT, 0);
}
}
-out:
- return;
-}
-
-/**
- * e1000_raise_eec_clk - Raise EEPROM clock
- * @hw: pointer to the HW structure
- * @eecd: pointer to the EEPROM
- *
- * Enable/Raise the EEPROM clock bit.
- **/
-static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
-{
- *eecd = *eecd | E1000_EECD_SK;
- ew32(EECD, *eecd);
- e1e_flush();
- udelay(hw->nvm.delay_usec);
-}
-
-/**
- * e1000_lower_eec_clk - Lower EEPROM clock
- * @hw: pointer to the HW structure
- * @eecd: pointer to the EEPROM
- *
- * Clear/Lower the EEPROM clock bit.
- **/
-static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
-{
- *eecd = *eecd & ~E1000_EECD_SK;
- ew32(EECD, *eecd);
- e1e_flush();
- udelay(hw->nvm.delay_usec);
-}
-
-/**
- * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
- * @hw: pointer to the HW structure
- * @data: data to send to the EEPROM
- * @count: number of bits to shift out
- *
- * We need to shift 'count' bits out to the EEPROM. So, the value in the
- * "data" parameter will be shifted out to the EEPROM one bit at a time.
- * In order to do this, "data" must be broken down into bits.
- **/
-static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = er32(EECD);
- u32 mask;
-
- mask = 0x01 << (count - 1);
- if (nvm->type == e1000_nvm_eeprom_spi)
- eecd |= E1000_EECD_DO;
-
- do {
- eecd &= ~E1000_EECD_DI;
-
- if (data & mask)
- eecd |= E1000_EECD_DI;
-
- ew32(EECD, eecd);
- e1e_flush();
-
- udelay(nvm->delay_usec);
-
- e1000_raise_eec_clk(hw, &eecd);
- e1000_lower_eec_clk(hw, &eecd);
-
- mask >>= 1;
- } while (mask);
-
- eecd &= ~E1000_EECD_DI;
- ew32(EECD, eecd);
-}
-
-/**
- * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
- * @hw: pointer to the HW structure
- * @count: number of bits to shift in
- *
- * In order to read a register from the EEPROM, we need to shift 'count' bits
- * in from the EEPROM. Bits are "shifted in" by raising the clock input to
- * the EEPROM (setting the SK bit), and then reading the value of the data out
- * "DO" bit. During this "shifting in" process the data in "DI" bit should
- * always be clear.
- **/
-static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
-{
- u32 eecd;
- u32 i;
- u16 data;
-
- eecd = er32(EECD);
-
- eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
- data = 0;
-
- for (i = 0; i < count; i++) {
- data <<= 1;
- e1000_raise_eec_clk(hw, &eecd);
-
- eecd = er32(EECD);
-
- eecd &= ~E1000_EECD_DI;
- if (eecd & E1000_EECD_DO)
- data |= 1;
-
- e1000_lower_eec_clk(hw, &eecd);
- }
-
- return data;
-}
-
-/**
- * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
- * @hw: pointer to the HW structure
- * @ee_reg: EEPROM flag for polling
- *
- * Polls the EEPROM status bit for either read or write completion based
- * upon the value of 'ee_reg'.
- **/
-s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
-{
- u32 attempts = 100000;
- u32 i, reg = 0;
-
- for (i = 0; i < attempts; i++) {
- if (ee_reg == E1000_NVM_POLL_READ)
- reg = er32(EERD);
- else
- reg = er32(EEWR);
-
- if (reg & E1000_NVM_RW_REG_DONE)
- return 0;
-
- udelay(5);
- }
-
- return -E1000_ERR_NVM;
-}
-
-/**
- * e1000e_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -E1000_ERR_NVM (-1).
- **/
-s32 e1000e_acquire_nvm(struct e1000_hw *hw)
-{
- u32 eecd = er32(EECD);
- s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
-
- ew32(EECD, eecd | E1000_EECD_REQ);
- eecd = er32(EECD);
-
- while (timeout) {
- if (eecd & E1000_EECD_GNT)
- break;
- udelay(5);
- eecd = er32(EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~E1000_EECD_REQ;
- ew32(EECD, eecd);
- e_dbg("Could not acquire NVM grant\n");
- return -E1000_ERR_NVM;
- }
-
- return 0;
-}
-
-/**
- * e1000_standby_nvm - Return EEPROM to standby state
- * @hw: pointer to the HW structure
- *
- * Return the EEPROM to a standby state.
- **/
-static void e1000_standby_nvm(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = er32(EECD);
-
- if (nvm->type == e1000_nvm_eeprom_spi) {
- /* Toggle CS to flush commands */
- eecd |= E1000_EECD_CS;
- ew32(EECD, eecd);
- e1e_flush();
- udelay(nvm->delay_usec);
- eecd &= ~E1000_EECD_CS;
- ew32(EECD, eecd);
- e1e_flush();
- udelay(nvm->delay_usec);
- }
-}
-
-/**
- * e1000_stop_nvm - Terminate EEPROM command
- * @hw: pointer to the HW structure
- *
- * Terminates the current command by inverting the EEPROM's chip select pin.
- **/
-static void e1000_stop_nvm(struct e1000_hw *hw)
-{
- u32 eecd;
-
- eecd = er32(EECD);
- if (hw->nvm.type == e1000_nvm_eeprom_spi) {
- /* Pull CS high */
- eecd |= E1000_EECD_CS;
- e1000_lower_eec_clk(hw, &eecd);
- }
-}
-
-/**
- * e1000e_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- **/
-void e1000e_release_nvm(struct e1000_hw *hw)
-{
- u32 eecd;
-
- e1000_stop_nvm(hw);
-
- eecd = er32(EECD);
- eecd &= ~E1000_EECD_REQ;
- ew32(EECD, eecd);
-}
-
-/**
- * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
- * @hw: pointer to the HW structure
- *
- * Setups the EEPROM for reading and writing.
- **/
-static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = er32(EECD);
- u8 spi_stat_reg;
-
- if (nvm->type == e1000_nvm_eeprom_spi) {
- u16 timeout = NVM_MAX_RETRY_SPI;
-
- /* Clear SK and CS */
- eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- ew32(EECD, eecd);
- e1e_flush();
- udelay(1);
-
- /*
- * Read "Status Register" repeatedly until the LSB is cleared.
- * The EEPROM will signal that the command has been completed
- * by clearing bit 0 of the internal status register. If it's
- * not cleared within 'timeout', then error out.
- */
- while (timeout) {
- e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
- hw->nvm.opcode_bits);
- spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
- if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
- break;
-
- udelay(5);
- e1000_standby_nvm(hw);
- timeout--;
- }
-
- if (!timeout) {
- e_dbg("SPI NVM Status error\n");
- return -E1000_ERR_NVM;
- }
- }
-
- return 0;
-}
-
-/**
- * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
- * @hw: pointer to the HW structure
- * @offset: offset of word in the EEPROM to read
- * @words: number of words to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 i, eerd = 0;
- s32 ret_val = 0;
-
- /*
- * A check for invalid values: offset too large, too many words,
- * too many words for the offset, and not enough words.
- */
- if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
- (words == 0)) {
- e_dbg("nvm parameter(s) out of bounds\n");
- return -E1000_ERR_NVM;
- }
-
- for (i = 0; i < words; i++) {
- eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
- E1000_NVM_RW_REG_START;
-
- ew32(EERD, eerd);
- ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
- if (ret_val)
- break;
-
- data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
- }
-
- return ret_val;
-}
-
-/**
- * e1000e_write_nvm_spi - Write to EEPROM using SPI
- * @hw: pointer to the HW structure
- * @offset: offset within the EEPROM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the EEPROM
- *
- * Writes data to EEPROM at offset using SPI interface.
- *
- * If e1000e_update_nvm_checksum is not called after this function , the
- * EEPROM will most likely contain an invalid checksum.
- **/
-s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
- u16 widx = 0;
-
- /*
- * A check for invalid values: offset too large, too many words,
- * and not enough words.
- */
- if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
- (words == 0)) {
- e_dbg("nvm parameter(s) out of bounds\n");
- return -E1000_ERR_NVM;
- }
-
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- while (widx < words) {
- u8 write_opcode = NVM_WRITE_OPCODE_SPI;
-
- ret_val = e1000_ready_nvm_eeprom(hw);
- if (ret_val) {
- nvm->ops.release(hw);
- return ret_val;
- }
-
- e1000_standby_nvm(hw);
-
- /* Send the WRITE ENABLE command (8 bit opcode) */
- e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
- nvm->opcode_bits);
-
- e1000_standby_nvm(hw);
-
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
- * opcode
- */
- if ((nvm->address_bits == 8) && (offset >= 128))
- write_opcode |= NVM_A8_OPCODE_SPI;
-
- /* Send the Write command (8-bit opcode + addr) */
- e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
- e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
- nvm->address_bits);
-
- /* Loop to allow for up to whole page write of eeprom */
- while (widx < words) {
- u16 word_out = data[widx];
- word_out = (word_out >> 8) | (word_out << 8);
- e1000_shift_out_eec_bits(hw, word_out, 16);
- widx++;
-
- if ((((offset + widx) * 2) % nvm->page_size) == 0) {
- e1000_standby_nvm(hw);
- break;
- }
- }
- }
-
- usleep_range(10000, 20000);
- nvm->ops.release(hw);
- return 0;
-}
-
-/**
- * e1000_read_pba_string_generic - Read device part number
- * @hw: pointer to the HW structure
- * @pba_num: pointer to device part number
- * @pba_num_size: size of part number buffer
- *
- * Reads the product board assembly (PBA) number from the EEPROM and stores
- * the value in pba_num.
- **/
-s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size)
-{
- s32 ret_val;
- u16 nvm_data;
- u16 pba_ptr;
- u16 offset;
- u16 length;
-
- if (pba_num == NULL) {
- e_dbg("PBA string buffer was null\n");
- ret_val = E1000_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- goto out;
- }
-
- ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- goto out;
- }
-
- /*
- * if nvm_data is not ptr guard the PBA must be in legacy format which
- * means pba_ptr is actually our second data word for the PBA number
- * and we can decode it into an ascii string
- */
- if (nvm_data != NVM_PBA_PTR_GUARD) {
- e_dbg("NVM PBA number is not stored as string\n");
-
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
- e_dbg("PBA string buffer too small\n");
- return E1000_ERR_NO_SPACE;
- }
-
- /* extract hex string from data and pba_ptr */
- pba_num[0] = (nvm_data >> 12) & 0xF;
- pba_num[1] = (nvm_data >> 8) & 0xF;
- pba_num[2] = (nvm_data >> 4) & 0xF;
- pba_num[3] = nvm_data & 0xF;
- pba_num[4] = (pba_ptr >> 12) & 0xF;
- pba_num[5] = (pba_ptr >> 8) & 0xF;
- pba_num[6] = '-';
- pba_num[7] = 0;
- pba_num[8] = (pba_ptr >> 4) & 0xF;
- pba_num[9] = pba_ptr & 0xF;
-
- /* put a null character on the end of our string */
- pba_num[10] = '\0';
-
- /* switch all the data but the '-' to hex char */
- for (offset = 0; offset < 10; offset++) {
- if (pba_num[offset] < 0xA)
- pba_num[offset] += '0';
- else if (pba_num[offset] < 0x10)
- pba_num[offset] += 'A' - 0xA;
- }
-
- goto out;
- }
-
- ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- goto out;
- }
-
- if (length == 0xFFFF || length == 0) {
- e_dbg("NVM PBA number section invalid length\n");
- ret_val = E1000_ERR_NVM_PBA_SECTION;
- goto out;
- }
- /* check if pba_num buffer is big enough */
- if (pba_num_size < (((u32)length * 2) - 1)) {
- e_dbg("PBA string buffer too small\n");
- ret_val = E1000_ERR_NO_SPACE;
- goto out;
- }
-
- /* trim pba length from start of string */
- pba_ptr++;
- length--;
-
- for (offset = 0; offset < length; offset++) {
- ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- goto out;
- }
- pba_num[offset * 2] = (u8)(nvm_data >> 8);
- pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
- }
- pba_num[offset * 2] = '\0';
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_read_mac_addr_generic - Read device MAC address
- * @hw: pointer to the HW structure
- *
- * Reads the device MAC address from the EEPROM and stores the value.
- * Since devices with two ports use the same EEPROM, we increment the
- * last bit in the MAC address for the second port.
- **/
-s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
-{
- u32 rar_high;
- u32 rar_low;
- u16 i;
-
- rar_high = er32(RAH(0));
- rar_low = er32(RAL(0));
-
- for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
- hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
-
- for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
- hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
-
- for (i = 0; i < ETH_ALEN; i++)
- hw->mac.addr[i] = hw->mac.perm_addr[i];
-
- return 0;
-}
-
-/**
- * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
- * and then verifies that the sum of the EEPROM is equal to 0xBABA.
- **/
-s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- checksum += nvm_data;
- }
-
- if (checksum != (u16) NVM_SUM) {
- e_dbg("NVM Checksum Invalid\n");
- return -E1000_ERR_NVM;
- }
-
- return 0;
-}
-
-/**
- * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Updates the EEPROM checksum by reading/adding each word of the EEPROM
- * up to the checksum. Then calculates the EEPROM checksum and writes the
- * value to the EEPROM.
- **/
-s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- for (i = 0; i < NVM_CHECKSUM_REG; i++) {
- ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error while updating checksum.\n");
- return ret_val;
- }
- checksum += nvm_data;
- }
- checksum = (u16) NVM_SUM - checksum;
- ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
- if (ret_val)
- e_dbg("NVM Write Error while updating checksum.\n");
-
- return ret_val;
-}
-
-/**
- * e1000e_reload_nvm - Reloads EEPROM
- * @hw: pointer to the HW structure
- *
- * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- * extended control register.
- **/
-void e1000e_reload_nvm(struct e1000_hw *hw)
-{
- u32 ctrl_ext;
-
- udelay(10);
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- ew32(CTRL_EXT, ctrl_ext);
- e1e_flush();
-}
-
-/**
- * e1000_calculate_checksum - Calculate checksum for buffer
- * @buffer: pointer to EEPROM
- * @length: size of EEPROM to calculate a checksum for
- *
- * Calculates the checksum for some buffer on a specified length. The
- * checksum calculated is returned.
- **/
-static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
-{
- u32 i;
- u8 sum = 0;
-
- if (!buffer)
- return 0;
-
- for (i = 0; i < length; i++)
- sum += buffer[i];
-
- return (u8) (0 - sum);
-}
-
-/**
- * e1000_mng_enable_host_if - Checks host interface is enabled
- * @hw: pointer to the HW structure
- *
- * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- * This function checks whether the HOST IF is enabled for command operation
- * and also checks whether the previous command is completed. It busy waits
- * in case of previous command is not completed.
- **/
-static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
-{
- u32 hicr;
- u8 i;
-
- if (!(hw->mac.arc_subsystem_valid)) {
- e_dbg("ARC subsystem not valid.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- /* Check that the host interface is enabled. */
- hicr = er32(HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
- e_dbg("E1000_HOST_EN bit disabled.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
- /* check the previous command is completed */
- for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
- hicr = er32(HICR);
- if (!(hicr & E1000_HICR_C))
- break;
- mdelay(1);
- }
-
- if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
- e_dbg("Previous command timeout failed .\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- return 0;
-}
-
-/**
- * e1000e_check_mng_mode_generic - check management mode
- * @hw: pointer to the HW structure
- *
- * Reads the firmware semaphore register and returns true (>0) if
- * manageability is enabled, else false (0).
- **/
-bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
-{
- u32 fwsm = er32(FWSM);
-
- return (fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
-}
-
-/**
- * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
- * @hw: pointer to the HW structure
- *
- * Enables packet filtering on transmit packets if manageability is enabled
- * and host interface is enabled.
- **/
-bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
-{
- struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
- u32 *buffer = (u32 *)&hw->mng_cookie;
- u32 offset;
- s32 ret_val, hdr_csum, csum;
- u8 i, len;
-
- hw->mac.tx_pkt_filtering = true;
-
- /* No manageability, no filtering */
- if (!e1000e_check_mng_mode(hw)) {
- hw->mac.tx_pkt_filtering = false;
- goto out;
- }
-
- /*
- * If we can't read from the host interface for whatever
- * reason, disable filtering.
- */
- ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val) {
- hw->mac.tx_pkt_filtering = false;
- goto out;
- }
-
- /* Read in the header. Length and offset are in dwords. */
- len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
- offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
- for (i = 0; i < len; i++)
- *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
- hdr_csum = hdr->checksum;
- hdr->checksum = 0;
- csum = e1000_calculate_checksum((u8 *)hdr,
- E1000_MNG_DHCP_COOKIE_LENGTH);
- /*
- * If either the checksums or signature don't match, then
- * the cookie area isn't considered valid, in which case we
- * take the safe route of assuming Tx filtering is enabled.
- */
- if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
- hw->mac.tx_pkt_filtering = true;
- goto out;
- }
-
- /* Cookie area is valid, make the final check for filtering. */
- if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
- hw->mac.tx_pkt_filtering = false;
- goto out;
- }
-
-out:
- return hw->mac.tx_pkt_filtering;
-}
-
-/**
- * e1000_mng_write_cmd_header - Writes manageability command header
- * @hw: pointer to the HW structure
- * @hdr: pointer to the host interface command header
- *
- * Writes the command header after does the checksum calculation.
- **/
-static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
- struct e1000_host_mng_command_header *hdr)
-{
- u16 i, length = sizeof(struct e1000_host_mng_command_header);
-
- /* Write the whole command header structure with new checksum. */
-
- hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
-
- length >>= 2;
- /* Write the relevant command block into the ram area. */
- for (i = 0; i < length; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
- *((u32 *) hdr + i));
- e1e_flush();
- }
-
- return 0;
-}
-
-/**
- * e1000_mng_host_if_write - Write to the manageability host interface
- * @hw: pointer to the HW structure
- * @buffer: pointer to the host interface buffer
- * @length: size of the buffer
- * @offset: location in the buffer to write to
- * @sum: sum of the data (not checksum)
- *
- * This function writes the buffer content at the offset given on the host if.
- * It also does alignment considerations to do the writes in most efficient
- * way. Also fills up the sum of the buffer in *buffer parameter.
- **/
-static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
- u16 length, u16 offset, u8 *sum)
-{
- u8 *tmp;
- u8 *bufptr = buffer;
- u32 data = 0;
- u16 remaining, i, j, prev_bytes;
-
- /* sum = only sum of the data and it is not checksum */
-
- if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
- return -E1000_ERR_PARAM;
-
- tmp = (u8 *)&data;
- prev_bytes = offset & 0x3;
- offset >>= 2;
-
- if (prev_bytes) {
- data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
- for (j = prev_bytes; j < sizeof(u32); j++) {
- *(tmp + j) = *bufptr++;
- *sum += *(tmp + j);
- }
- E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
- length -= j - prev_bytes;
- offset++;
- }
-
- remaining = length & 0x3;
- length -= remaining;
-
- /* Calculate length in DWORDs */
- length >>= 2;
-
- /*
- * The device driver writes the relevant command block into the
- * ram area.
- */
- for (i = 0; i < length; i++) {
- for (j = 0; j < sizeof(u32); j++) {
- *(tmp + j) = *bufptr++;
- *sum += *(tmp + j);
- }
-
- E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
- }
- if (remaining) {
- for (j = 0; j < sizeof(u32); j++) {
- if (j < remaining)
- *(tmp + j) = *bufptr++;
- else
- *(tmp + j) = 0;
-
- *sum += *(tmp + j);
- }
- E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
- }
-
- return 0;
-}
-
-/**
- * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
- * @hw: pointer to the HW structure
- * @buffer: pointer to the host interface
- * @length: size of the buffer
- *
- * Writes the DHCP information to the host interface.
- **/
-s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
-{
- struct e1000_host_mng_command_header hdr;
- s32 ret_val;
- u32 hicr;
-
- hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
- hdr.command_length = length;
- hdr.reserved1 = 0;
- hdr.reserved2 = 0;
- hdr.checksum = 0;
-
- /* Enable the host interface */
- ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val)
- return ret_val;
-
- /* Populate the host interface with the contents of "buffer". */
- ret_val = e1000_mng_host_if_write(hw, buffer, length,
- sizeof(hdr), &(hdr.checksum));
- if (ret_val)
- return ret_val;
-
- /* Write the manageability command header */
- ret_val = e1000_mng_write_cmd_header(hw, &hdr);
- if (ret_val)
- return ret_val;
-
- /* Tell the ARC a new command is pending. */
- hicr = er32(HICR);
- ew32(HICR, hicr | E1000_HICR_C);
-
- return 0;
-}
-
-/**
- * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
- * @hw: pointer to the HW structure
- *
- * Verifies the hardware needs to leave interface enabled so that frames can
- * be directed to and from the management interface.
- **/
-bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
-{
- u32 manc;
- u32 fwsm, factps;
- bool ret_val = false;
-
- manc = er32(MANC);
-
- if (!(manc & E1000_MANC_RCV_TCO_EN))
- goto out;
-
- if (hw->mac.has_fwsm) {
- fwsm = er32(FWSM);
- factps = er32(FACTPS);
-
- if (!(factps & E1000_FACTPS_MNGCG) &&
- ((fwsm & E1000_FWSM_MODE_MASK) ==
- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
- ret_val = true;
- goto out;
- }
- } else if ((hw->mac.type == e1000_82574) ||
- (hw->mac.type == e1000_82583)) {
- u16 data;
-
- factps = er32(FACTPS);
- e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
-
- if (!(factps & E1000_FACTPS_MNGCG) &&
- ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
- (e1000_mng_mode_pt << 13))) {
- ret_val = true;
- goto out;
- }
- } else if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
- ret_val = true;
- goto out;
- }
-
-out:
- return ret_val;
}
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
new file mode 100644
index 000000000000..473f8e711510
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -0,0 +1,367 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8)(0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ if (!hw->mac.arc_subsystem_valid) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = er32(HICR);
+ if ((hicr & E1000_HICR_EN) == 0) {
+ e_dbg("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = er32(HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ mdelay(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ e_dbg("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_check_mng_mode_generic - Generic check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = er32(FWSM);
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /*
+ * If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF,
+ offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /*
+ * If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+ hw->mac.tx_pkt_filtering = false;
+
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i));
+ e1e_flush();
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_mng_host_if_write - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /*
+ * The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = er32(HICR);
+ ew32(HICR, hicr | E1000_HICR_C);
+
+ return 0;
+}
+
+/**
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = er32(FWSM);
+ factps = er32(FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
+ return true;
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = er32(FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13)))
+ return true;
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3911401ed65d..a9a4ea2c616e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -56,7 +56,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
+#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -137,7 +137,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{E1000_TDFPC, "TDFPC"},
/* List Terminator */
- {}
+ {0, NULL}
};
/*
@@ -183,18 +183,18 @@ static void e1000e_dump(struct e1000_adapter *adapter)
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc;
struct my_u0 {
- u64 a;
- u64 b;
+ __le64 a;
+ __le64 b;
} *u0;
struct e1000_buffer *buffer_info;
struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_packet_split *rx_desc_ps;
union e1000_rx_desc_extended *rx_desc;
struct my_u1 {
- u64 a;
- u64 b;
- u64 c;
- u64 d;
+ __le64 a;
+ __le64 b;
+ __le64 c;
+ __le64 d;
} *u1;
u32 staterr;
int i = 0;
@@ -221,7 +221,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print Tx Ring Summary */
if (!netdev || !netif_running(netdev))
- goto exit;
+ return;
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
@@ -308,7 +308,7 @@ rx_ring_summary:
/* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
- goto exit;
+ return;
dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
switch (adapter->rx_ps_pages) {
@@ -449,9 +449,6 @@ rx_ring_summary:
}
}
}
-
-exit:
- return;
}
/**
@@ -487,22 +484,27 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
/**
* e1000_rx_checksum - Receive Checksum Offload
- * @adapter: board private structure
- * @status_err: receive descriptor status and error fields
- * @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- u32 csum, struct sk_buff *skb)
+ __le16 csum, struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
skb_checksum_none_assert(skb);
+ /* Rx checksum disabled */
+ if (!(adapter->netdev->features & NETIF_F_RXCSUM))
+ return;
+
/* Ignore Checksum bit is set */
if (status & E1000_RXD_STAT_IXSM)
return;
+
/* TCP/UDP checksum error bit is set */
if (errors & E1000_RXD_ERR_TCPE) {
/* let the stack verify checksum errors */
@@ -524,7 +526,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* Hardware complements the payload checksum, so we undo it
* and then put the value in host order for further stack use.
*/
- __sum16 sum = (__force __sum16)htons(csum);
+ __sum16 sum = (__force __sum16)swab16((__force u16)csum);
skb->csum = csum_unfold(~sum);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -545,7 +547,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* which has bit 24 set while ME is accessing Host CSR registers, wait
* if it is set and try again a number of times.
**/
-static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
unsigned int i)
{
unsigned int j = 0;
@@ -562,12 +564,12 @@ static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
return 0;
}
-static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
- u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- if (e1000e_update_tail_wa(hw, tail, i)) {
+ if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
@@ -575,12 +577,12 @@ static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
}
}
-static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
- u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- if (e1000e_update_tail_wa(hw, tail, i)) {
+ if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
@@ -590,14 +592,14 @@ static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
/**
* e1000_alloc_rx_buffers - Replace used receive buffers
- * @adapter: address of board private structure
+ * @rx_ring: Rx descriptor ring
**/
-static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_extended *rx_desc;
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
@@ -644,9 +646,9 @@ map_skb:
*/
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(adapter, i);
+ e1000e_update_rdt_wa(rx_ring, i);
else
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ writel(i, rx_ring->tail);
}
i++;
if (i == rx_ring->count)
@@ -659,15 +661,15 @@ map_skb:
/**
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: Rx descriptor ring
**/
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_packet_split *rx_desc;
- struct e1000_ring *rx_ring = adapter->rx_ring;
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct sk_buff *skb;
@@ -747,10 +749,9 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
*/
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(adapter, i << 1);
+ e1000e_update_rdt_wa(rx_ring, i << 1);
else
- writel(i << 1,
- adapter->hw.hw_addr + rx_ring->tail);
+ writel(i << 1, rx_ring->tail);
}
i++;
@@ -765,17 +766,17 @@ no_buffers:
/**
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
- * @adapter: address of board private structure
+ * @rx_ring: Rx descriptor ring
* @cleaned_count: number of buffers to allocate this pass
**/
-static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_extended *rx_desc;
- struct e1000_ring *rx_ring = adapter->rx_ring;
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
@@ -834,26 +835,33 @@ check_page:
* such as IA-64). */
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(adapter, i);
+ e1000e_update_rdt_wa(rx_ring, i);
else
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ writel(i, rx_ring->tail);
}
}
+static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
+ struct sk_buff *skb)
+{
+ if (netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = le32_to_cpu(rss);
+}
+
/**
- * e1000_clean_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
+ * e1000_clean_rx_irq - Send received data up the network stack
+ * @rx_ring: Rx descriptor ring
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
-static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
+static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
+ int work_to_do)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_extended *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
u32 length, staterr;
@@ -918,15 +926,24 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
goto next_desc;
}
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL))) {
/* recycle */
buffer_info->skb = skb;
goto next_desc;
}
/* adjust length to remove Ethernet CRC */
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
- length -= 4;
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
+ /* If configured to store CRC, don't subtract FCS,
+ * but keep the FCS bytes out of the total_rx_bytes
+ * counter
+ */
+ if (netdev->features & NETIF_F_RXFCS)
+ total_rx_bytes -= 4;
+ else
+ length -= 4;
+ }
total_rx_bytes += length;
total_rx_packets++;
@@ -957,8 +974,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
/* Receive Checksum Offload */
e1000_rx_checksum(adapter, staterr,
- le16_to_cpu(rx_desc->wb.lower.hi_dword.
- csum_ip.csum), skb);
+ rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
e1000_receive_skb(adapter, netdev, skb, staterr,
rx_desc->wb.upper.vlan);
@@ -968,7 +986,7 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count,
+ adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
@@ -983,16 +1001,18 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
return cleaned;
}
-static void e1000_put_txbuf(struct e1000_adapter *adapter,
- struct e1000_buffer *buffer_info)
+static void e1000_put_txbuf(struct e1000_ring *tx_ring,
+ struct e1000_buffer *buffer_info)
{
+ struct e1000_adapter *adapter = tx_ring->adapter;
+
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
@@ -1063,8 +1083,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
"PHY 1000BASE-T Status <%x>\n"
"PHY Extended Status <%x>\n"
"PCI Status <%x>\n",
- readl(adapter->hw.hw_addr + tx_ring->head),
- readl(adapter->hw.hw_addr + tx_ring->tail),
+ readl(tx_ring->head),
+ readl(tx_ring->tail),
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_ring->buffer_info[eop].time_stamp,
@@ -1080,16 +1100,16 @@ static void e1000_print_hw_hang(struct work_struct *work)
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
-static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
{
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct e1000_buffer *buffer_info;
unsigned int i, eop;
@@ -1119,7 +1139,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
}
- e1000_put_txbuf(adapter, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info);
tx_desc->upper.data = 0;
i++;
@@ -1173,19 +1193,19 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
-static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
+static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
+ int work_to_do)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
struct e1000_buffer *buffer_info, *next_buffer;
struct e1000_ps_page *ps_page;
struct sk_buff *skb;
@@ -1236,7 +1256,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
goto next_desc;
}
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1253,43 +1274,51 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
skb_put(skb, length);
{
- /*
- * this looks ugly, but it seems compiler issues make it
- * more efficient than reusing j
- */
- int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
-
- /*
- * page alloc/put takes too long and effects small packet
- * throughput, so unsplit small packets and save the alloc/put
- * only valid in softirq (napi) context to call kmap_*
- */
- if (l1 && (l1 <= copybreak) &&
- ((length + l1) <= adapter->rx_ps_bsize0)) {
- u8 *vaddr;
-
- ps_page = &buffer_info->ps_pages[0];
+ /*
+ * this looks ugly, but it seems compiler issues make
+ * it more efficient than reusing j
+ */
+ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
/*
- * there is no documentation about how to call
- * kmap_atomic, so we can't hold the mapping
- * very long
+ * page alloc/put takes too long and effects small
+ * packet throughput, so unsplit small packets and
+ * save the alloc/put only valid in softirq (napi)
+ * context to call kmap_*
*/
- dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
- memcpy(skb_tail_pointer(skb), vaddr, l1);
- kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- dma_sync_single_for_device(&pdev->dev, ps_page->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- /* remove the CRC */
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
- l1 -= 4;
-
- skb_put(skb, l1);
- goto copydone;
- } /* if */
+ if (l1 && (l1 <= copybreak) &&
+ ((length + l1) <= adapter->rx_ps_bsize0)) {
+ u8 *vaddr;
+
+ ps_page = &buffer_info->ps_pages[0];
+
+ /*
+ * there is no documentation about how to call
+ * kmap_atomic, so we can't hold the mapping
+ * very long
+ */
+ dma_sync_single_for_cpu(&pdev->dev,
+ ps_page->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ vaddr = kmap_atomic(ps_page->page,
+ KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr, l1);
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+ dma_sync_single_for_device(&pdev->dev,
+ ps_page->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* remove the CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
+ if (!(netdev->features & NETIF_F_RXFCS))
+ l1 -= 4;
+ }
+
+ skb_put(skb, l1);
+ goto copydone;
+ } /* if */
}
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -1311,15 +1340,19 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
/* strip the ethernet crc, problem is we're using pages now so
* this whole operation can get a little cpu intensive
*/
- if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
- pskb_trim(skb, skb->len - 4);
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
+ if (!(netdev->features & NETIF_F_RXFCS))
+ pskb_trim(skb, skb->len - 4);
+ }
copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
- e1000_rx_checksum(adapter, staterr, le16_to_cpu(
- rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
+ e1000_rx_checksum(adapter, staterr,
+ rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
if (rx_desc->wb.upper.header_status &
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
@@ -1334,7 +1367,7 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count,
+ adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
@@ -1349,7 +1382,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@@ -1375,13 +1408,12 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
**/
-
-static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
+static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
+ int work_to_do)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_extended *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
u32 length, staterr;
@@ -1424,7 +1456,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* errors is only valid for DD + EOP descriptors */
if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
- (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
+ ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window too */
@@ -1491,8 +1524,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter, staterr,
- le16_to_cpu(rx_desc->wb.lower.hi_dword.
- csum_ip.csum), skb);
+ rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1513,7 +1547,7 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
- adapter->alloc_rx_buf(adapter, cleaned_count,
+ adapter->alloc_rx_buf(rx_ring, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
@@ -1528,7 +1562,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+ adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@@ -1537,11 +1571,11 @@ next_desc:
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
**/
-static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
+static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
{
- struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct pci_dev *pdev = adapter->pdev;
@@ -1601,8 +1635,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_use = 0;
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
+ writel(0, rx_ring->head);
+ writel(0, rx_ring->tail);
}
static void e1000e_downshift_workaround(struct work_struct *work)
@@ -1633,7 +1667,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
*/
if (icr & E1000_ICR_LSC) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/*
* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
@@ -1699,7 +1733,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
*/
if (icr & E1000_ICR_LSC) {
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/*
* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
@@ -1756,7 +1790,7 @@ static irqreturn_t e1000_msix_other(int irq, void *data)
if (icr & E1000_ICR_OTHER) {
if (!(icr & E1000_ICR_LSC))
goto no_link_interrupt;
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -1781,7 +1815,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
- if (!e1000_clean_tx_irq(adapter))
+ if (!e1000_clean_tx_irq(tx_ring))
/* Ring was not completely cleaned, so fire another interrupt */
ew32(ICS, tx_ring->ims_val);
@@ -1792,14 +1826,15 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *rx_ring = adapter->rx_ring;
/* Write the ITR value calculated at the end of the
* previous interrupt.
*/
- if (adapter->rx_ring->set_itr) {
- writel(1000000000 / (adapter->rx_ring->itr_val * 256),
- adapter->hw.hw_addr + adapter->rx_ring->itr_register);
- adapter->rx_ring->set_itr = 0;
+ if (rx_ring->set_itr) {
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ rx_ring->itr_register);
+ rx_ring->set_itr = 0;
}
if (napi_schedule_prep(&adapter->napi)) {
@@ -1839,9 +1874,9 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
adapter->eiac_mask |= rx_ring->ims_val;
if (rx_ring->itr_val)
writel(1000000000 / (rx_ring->itr_val * 256),
- hw->hw_addr + rx_ring->itr_register);
+ rx_ring->itr_register);
else
- writel(1, hw->hw_addr + rx_ring->itr_register);
+ writel(1, rx_ring->itr_register);
ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
/* Configure Tx vector */
@@ -1849,9 +1884,9 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
vector++;
if (tx_ring->itr_val)
writel(1000000000 / (tx_ring->itr_val * 256),
- hw->hw_addr + tx_ring->itr_register);
+ tx_ring->itr_register);
else
- writel(1, hw->hw_addr + tx_ring->itr_register);
+ writel(1, tx_ring->itr_register);
adapter->eiac_mask |= tx_ring->ims_val;
ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
@@ -1965,8 +2000,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
e1000_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
- adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
+ return err;
+ adapter->rx_ring->itr_register = adapter->hw.hw_addr +
+ E1000_EITR_82574(vector);
adapter->rx_ring->itr_val = adapter->itr;
vector++;
@@ -1980,20 +2016,20 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
e1000_intr_msix_tx, 0, adapter->tx_ring->name,
netdev);
if (err)
- goto out;
- adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
+ return err;
+ adapter->tx_ring->itr_register = adapter->hw.hw_addr +
+ E1000_EITR_82574(vector);
adapter->tx_ring->itr_val = adapter->itr;
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
e1000_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ return err;
e1000_configure_msix(adapter);
+
return 0;
-out:
- return err;
}
/**
@@ -2162,13 +2198,13 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
/**
* e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring
*
* Return 0 on success, negative on failure
**/
-int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
+int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
{
- struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_adapter *adapter = tx_ring->adapter;
int err = -ENOMEM, size;
size = sizeof(struct e1000_buffer) * tx_ring->count;
@@ -2196,13 +2232,13 @@ err:
/**
* e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
*
* Returns 0 on success, negative on failure
**/
-int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
+int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
{
- struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_buffer *buffer_info;
int i, size, desc_len, err = -ENOMEM;
@@ -2249,18 +2285,18 @@ err:
/**
* e1000_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring
**/
-static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
+static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
{
- struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_buffer *buffer_info;
unsigned long size;
unsigned int i;
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info);
}
netdev_reset_queue(adapter->netdev);
@@ -2272,22 +2308,22 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
+ writel(0, tx_ring->head);
+ writel(0, tx_ring->tail);
}
/**
* e1000e_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring
*
* Free all transmit software resources
**/
-void e1000e_free_tx_resources(struct e1000_adapter *adapter)
+void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
{
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *tx_ring = adapter->tx_ring;
- e1000_clean_tx_ring(adapter);
+ e1000_clean_tx_ring(tx_ring);
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
@@ -2299,18 +2335,17 @@ void e1000e_free_tx_resources(struct e1000_adapter *adapter)
/**
* e1000e_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
*
* Free all receive software resources
**/
-
-void e1000e_free_rx_resources(struct e1000_adapter *adapter)
+void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
{
+ struct e1000_adapter *adapter = rx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
int i;
- e1000_clean_rx_ring(adapter);
+ e1000_clean_rx_ring(rx_ring);
for (i = 0; i < rx_ring->count; i++)
kfree(rx_ring->buffer_info[i].ps_pages);
@@ -2346,7 +2381,7 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
unsigned int retval = itr_setting;
if (packets == 0)
- goto update_itr_done;
+ return itr_setting;
switch (itr_setting) {
case lowest_latency:
@@ -2381,7 +2416,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
break;
}
-update_itr_done:
return retval;
}
@@ -2464,13 +2498,19 @@ set_itr_now:
**/
static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
{
- adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ int size = sizeof(struct e1000_ring);
+
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL);
if (!adapter->tx_ring)
goto err;
+ adapter->tx_ring->count = adapter->tx_ring_count;
+ adapter->tx_ring->adapter = adapter;
- adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL);
if (!adapter->rx_ring)
goto err;
+ adapter->rx_ring->count = adapter->rx_ring_count;
+ adapter->rx_ring->adapter = adapter;
return 0;
err:
@@ -2498,10 +2538,10 @@ static int e1000_clean(struct napi_struct *napi, int budget)
!(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
goto clean_rx;
- tx_cleaned = e1000_clean_tx_irq(adapter);
+ tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
clean_rx:
- adapter->clean_rx(adapter, &work_done, budget);
+ adapter->clean_rx(adapter->rx_ring, &work_done, budget);
if (!tx_cleaned)
work_done = budget;
@@ -2746,8 +2786,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
u64 tdba;
- u32 tdlen, tctl, tipg, tarc;
- u32 ipgr1, ipgr2;
+ u32 tdlen, tarc;
/* Setup the HW Tx Head and Tail descriptor pointers */
tdba = tx_ring->dma;
@@ -2757,20 +2796,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDLEN, tdlen);
ew32(TDH, 0);
ew32(TDT, 0);
- tx_ring->head = E1000_TDH;
- tx_ring->tail = E1000_TDT;
-
- /* Set the default values for the Tx Inter Packet Gap timer */
- tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
- ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
- ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
-
- if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
- ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
-
- tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
- tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
- ew32(TIPG, tipg);
+ tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
+ tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
@@ -2793,15 +2820,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*/
txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
ew32(TXDCTL(0), txdctl);
- /* erratum work around: set txdctl the same for both queues */
- ew32(TXDCTL(1), txdctl);
}
-
- /* Program the Transmit Control Register */
- tctl = er32(TCTL);
- tctl &= ~E1000_TCTL_CT;
- tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
- (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+ /* erratum work around: set txdctl the same for both queues */
+ ew32(TXDCTL(1), er32(TXDCTL(0)));
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
@@ -2834,9 +2855,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* enable Report Status bit */
adapter->txd_cmd |= E1000_TXD_CMD_RS;
- ew32(TCTL, tctl);
-
- e1000e_config_collision_dist(hw);
+ hw->mac.ops.config_collision_dist(hw);
}
/**
@@ -2944,8 +2963,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
* per packet.
*/
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
- (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
+ if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
else
adapter->rx_ps_pages = 0;
@@ -2982,6 +3000,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ew32(PSRCTL, psrctl);
}
+ /* This is useful for sniffing bad packets. */
+ if (adapter->netdev->features & NETIF_F_RXALL) {
+ /* UPE and MPE will be handled by normal PROMISC logic
+ * in e1000e_set_rx_mode */
+ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+ E1000_RCTL_BAM | /* RX All Bcast Pkts */
+ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+ rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+ E1000_RCTL_DPF | /* Allow filtered pause */
+ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+ /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+ * and that breaks VLANs.
+ */
+ }
+
ew32(RFCTL, rfctl);
ew32(RCTL, rctl);
/* just started the receive unit, no need to restart */
@@ -3072,8 +3106,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDLEN, rdlen);
ew32(RDH, 0);
ew32(RDT, 0);
- rx_ring->head = E1000_RDH;
- rx_ring->tail = E1000_RDT;
+ rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
+ rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
@@ -3092,23 +3126,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
ew32(RXCSUM, rxcsum);
- /*
- * Enable early receives on supported devices, only takes effect when
- * packet size is equal or larger than the specified value (in 8 byte
- * units), e.g. using jumbo frames when setting to E1000_ERT_2048
- */
- if ((adapter->flags & FLAG_HAS_ERT) ||
- (adapter->hw.mac.type == e1000_pch2lan)) {
+ if (adapter->hw.mac.type == e1000_pch2lan) {
+ /*
+ * With jumbo frames, excessive C-state transition
+ * latencies result in dropped transactions.
+ */
if (adapter->netdev->mtu > ETH_DATA_LEN) {
u32 rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl | 0x3);
- if (adapter->flags & FLAG_HAS_ERT)
- ew32(ERT, E1000_ERT_2048 | (1 << 13));
- /*
- * With jumbo frames and early-receive enabled,
- * excessive C-state transition latencies result in
- * dropped transactions.
- */
pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
} else {
pm_qos_update_request(&adapter->netdev->pm_qos_req,
@@ -3237,6 +3262,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
e1000e_vlan_filter_disable(adapter);
} else {
int count;
+
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
} else {
@@ -3268,22 +3294,62 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
e1000e_vlan_strip_disable(adapter);
}
+static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc, rxcsum;
+ int i;
+ static const u32 rsskey[10] = {
+ 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
+ 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
+ };
+
+ /* Fill out hash function seed */
+ for (i = 0; i < 10; i++)
+ ew32(RSSRK(i), rsskey[i]);
+
+ /* Direct all traffic to queue 0 */
+ for (i = 0; i < 32; i++)
+ ew32(RETA(i), 0);
+
+ /*
+ * Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback.
+ */
+ rxcsum = er32(RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ ew32(RXCSUM, rxcsum);
+
+ mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+ ew32(MRQC, mrqc);
+}
+
/**
* e1000_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+
e1000e_set_rx_mode(adapter->netdev);
e1000_restore_vlan(adapter);
e1000_init_manageability_pt(adapter);
e1000_configure_tx(adapter);
+
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ e1000e_setup_rss_hash(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
- adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
- GFP_KERNEL);
+ adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
}
/**
@@ -3379,9 +3445,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available
*/
- if ((pba < min_rx_space) &&
- (!(adapter->flags & FLAG_HAS_ERT)))
- /* ERT enabled in e1000_configure_rx */
+ if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -3395,26 +3459,29 @@ void e1000e_reset(struct e1000_adapter *adapter)
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
* - 90% of the Rx FIFO size, and
- * - the full Rx FIFO size minus the early receive size (for parts
- * with ERT support assuming ERT set to E1000_ERT_2048), or
* - the full Rx FIFO size minus one full frame
*/
if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
fc->pause_time = 0xFFFF;
else
fc->pause_time = E1000_FC_PAUSE_TIME;
- fc->send_xon = 1;
+ fc->send_xon = true;
fc->current_mode = fc->requested_mode;
switch (hw->mac.type) {
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ pba = 14;
+ ew32(PBA, pba);
+ fc->high_water = 0x2800;
+ fc->low_water = fc->high_water - 8;
+ break;
+ }
+ /* fall-through */
default:
- if ((adapter->flags & FLAG_HAS_ERT) &&
- (adapter->netdev->mtu > ETH_DATA_LEN))
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - (E1000_ERT_2048 << 3)));
- else
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - adapter->max_frame_size));
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - adapter->max_frame_size));
fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
fc->low_water = fc->high_water - 8;
@@ -3447,11 +3514,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
/*
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
- * fit in receive buffer and early-receive not supported.
+ * fit in receive buffer.
*/
if (adapter->itr_setting & 0x3) {
- if (((adapter->max_frame_size * 2) > (pba << 10)) &&
- !(adapter->flags & FLAG_HAS_ERT)) {
+ if ((adapter->max_frame_size * 2) > (pba << 10)) {
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned off\n");
@@ -3593,8 +3659,8 @@ void e1000e_down(struct e1000_adapter *adapter)
spin_unlock(&adapter->stats64_lock);
e1000e_flush_descriptors(adapter);
- e1000_clean_tx_ring(adapter);
- e1000_clean_rx_ring(adapter);
+ e1000_clean_tx_ring(adapter->tx_ring);
+ e1000_clean_rx_ring(adapter->rx_ring);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3634,6 +3700,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->rx_ps_bsize0 = 128;
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ adapter->tx_ring_count = E1000_DEFAULT_TXD;
+ adapter->rx_ring_count = E1000_DEFAULT_RXD;
spin_lock_init(&adapter->stats64_lock);
@@ -3721,8 +3789,9 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_MSI_TEST_FAILED) {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
e_info("MSI interrupt test failed, using legacy interrupt.\n");
- } else
+ } else {
e_dbg("MSI interrupt test succeeded!\n");
+ }
free_irq(adapter->pdev->irq, netdev);
pci_disable_msi(adapter->pdev);
@@ -3792,12 +3861,12 @@ static int e1000_open(struct net_device *netdev)
netif_carrier_off(netdev);
/* allocate transmit descriptors */
- err = e1000e_setup_tx_resources(adapter);
+ err = e1000e_setup_tx_resources(adapter->tx_ring);
if (err)
goto err_setup_tx;
/* allocate receive descriptors */
- err = e1000e_setup_rx_resources(adapter);
+ err = e1000e_setup_rx_resources(adapter->rx_ring);
if (err)
goto err_setup_rx;
@@ -3817,9 +3886,8 @@ static int e1000_open(struct net_device *netdev)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
e1000_update_mng_vlan(adapter);
- /* DMA latency requirement to workaround early-receive/jumbo issue */
- if ((adapter->flags & FLAG_HAS_ERT) ||
- (adapter->hw.mac.type == e1000_pch2lan))
+ /* DMA latency requirement to workaround jumbo issue */
+ if (adapter->hw.mac.type == e1000_pch2lan)
pm_qos_add_request(&adapter->netdev->pm_qos_req,
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -3873,9 +3941,9 @@ static int e1000_open(struct net_device *netdev)
err_req_irq:
e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
- e1000e_free_rx_resources(adapter);
+ e1000e_free_rx_resources(adapter->rx_ring);
err_setup_rx:
- e1000e_free_tx_resources(adapter);
+ e1000e_free_tx_resources(adapter->tx_ring);
err_setup_tx:
e1000e_reset(adapter);
pm_runtime_put_sync(&pdev->dev);
@@ -3911,8 +3979,8 @@ static int e1000_close(struct net_device *netdev)
}
e1000_power_down_phy(adapter);
- e1000e_free_tx_resources(adapter);
- e1000e_free_rx_resources(adapter);
+ e1000e_free_tx_resources(adapter->tx_ring);
+ e1000e_free_rx_resources(adapter->rx_ring);
/*
* kill manageability vlan ID if supported, but not if a vlan with
@@ -3930,8 +3998,7 @@ static int e1000_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- if ((adapter->flags & FLAG_HAS_ERT) ||
- (adapter->hw.mac.type == e1000_pch2lan))
+ if (adapter->hw.mac.type == e1000_pch2lan)
pm_qos_remove_request(&adapter->netdev->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4566,13 +4633,12 @@ link_up:
#define E1000_TX_FLAGS_VLAN 0x00000002
#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_NO_FCS 0x00000010
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static int e1000_tso(struct e1000_adapter *adapter,
- struct sk_buff *skb)
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
{
- struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
unsigned int i;
@@ -4641,9 +4707,9 @@ static int e1000_tso(struct e1000_adapter *adapter,
return 1;
}
-static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
{
- struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
unsigned int i;
@@ -4704,12 +4770,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
#define E1000_MAX_PER_TXD 8192
#define E1000_MAX_TXD_PWR 12
-static int e1000_tx_map(struct e1000_adapter *adapter,
- struct sk_buff *skb, unsigned int first,
- unsigned int max_per_txd, unsigned int nr_frags,
- unsigned int mss)
+static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ unsigned int first, unsigned int max_per_txd,
+ unsigned int nr_frags, unsigned int mss)
{
- struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
@@ -4795,16 +4860,15 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info);
}
return 0;
}
-static void e1000_tx_queue(struct e1000_adapter *adapter,
- int tx_flags, int count)
+static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
{
- struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -4829,6 +4893,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
}
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
+ txd_lower &= ~(E1000_TXD_CMD_IFCS);
+
i = tx_ring->next_to_use;
do {
@@ -4846,6 +4913,10 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+ /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
+ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
+ tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
+
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -4857,9 +4928,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
tx_ring->next_to_use = i;
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_tdt_wa(adapter, i);
+ e1000e_update_tdt_wa(tx_ring, i);
else
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ writel(i, tx_ring->tail);
/*
* we need this if more than one processor can write to our tail
@@ -4907,11 +4978,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
return 0;
}
-static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_adapter *adapter = tx_ring->adapter;
- netif_stop_queue(netdev);
+ netif_stop_queue(adapter->netdev);
/*
* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -4923,25 +4994,23 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
* We need to check again in a case another CPU has just
* made room available.
*/
- if (e1000_desc_unused(adapter->tx_ring) < size)
+ if (e1000_desc_unused(tx_ring) < size)
return -EBUSY;
/* A reprieve! */
- netif_start_queue(netdev);
+ netif_start_queue(adapter->netdev);
++adapter->restart_queue;
return 0;
}
-static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
+static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- if (e1000_desc_unused(adapter->tx_ring) >= size)
+ if (e1000_desc_unused(tx_ring) >= size)
return 0;
- return __e1000_maybe_stop_tx(netdev, size);
+ return __e1000_maybe_stop_tx(tx_ring, size);
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -4995,7 +5064,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (skb->data_len && (hdr_len == len)) {
unsigned int pull_size;
- pull_size = min((unsigned int)4, skb->data_len);
+ pull_size = min_t(unsigned int, 4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
e_err("__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
@@ -5024,7 +5093,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time
*/
- if (e1000_maybe_stop_tx(netdev, count + 2))
+ if (e1000_maybe_stop_tx(tx_ring, count + 2))
return NETDEV_TX_BUSY;
if (vlan_tx_tag_present(skb)) {
@@ -5034,7 +5103,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = e1000_tso(adapter, skb);
+ tso = e1000_tso(tx_ring, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -5042,7 +5111,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (tso)
tx_flags |= E1000_TX_FLAGS_TSO;
- else if (e1000_tx_csum(adapter, skb))
+ else if (e1000_tx_csum(tx_ring, skb))
tx_flags |= E1000_TX_FLAGS_CSUM;
/*
@@ -5053,13 +5122,16 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
+ if (unlikely(skb->no_fcs))
+ tx_flags |= E1000_TX_FLAGS_NO_FCS;
+
/* if count is 0 then mapping error has occurred */
- count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
+ count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
if (count) {
netdev_sent_queue(netdev, skb->len);
- e1000_tx_queue(adapter, tx_flags, count);
+ e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+ e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
} else {
dev_kfree_skb_any(skb);
@@ -5165,10 +5237,22 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
- !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
+ if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * IP payload checksum (enabled with jumbos/packet-split when
+ * Rx checksum is enabled) and generation of RSS hash is
+ * mutually exclusive in the hardware.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (netdev->features & NETIF_F_RXHASH)) {
+ e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
+ return -EINVAL;
+ }
}
/* Supported frame sizes */
@@ -5322,7 +5406,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
if (retval)
- goto out;
+ goto release;
/* copy MAC MTA to PHY MTA - only needed for pchlan */
for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5366,7 +5450,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
if (retval)
e_err("Could not set PHY Host Wakeup bit\n");
-out:
+release:
hw->phy.ops.release(hw);
return retval;
@@ -5908,7 +5992,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
+ strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -5923,7 +6007,8 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
return;
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
- if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
+ le16_to_cpus(&buf);
+ if (!ret_val && (!(buf & (1 << 0)))) {
/* Deep Smart Power Down (DSPD) */
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
@@ -5931,7 +6016,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
static int e1000_set_features(struct net_device *netdev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
@@ -5940,9 +6025,37 @@ static int e1000_set_features(struct net_device *netdev,
adapter->flags |= FLAG_TSO_FORCE;
if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
- NETIF_F_RXCSUM)))
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
+ NETIF_F_RXALL)))
return 0;
+ /*
+ * IP payload checksum (enabled with jumbos/packet-split when Rx
+ * checksum is enabled) and generation of RSS hash is mutually
+ * exclusive in the hardware.
+ */
+ if (adapter->rx_ps_pages &&
+ (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
+ e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
+ return -EINVAL;
+ }
+
+ if (changed & NETIF_F_RXFCS) {
+ if (features & NETIF_F_RXFCS) {
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
+ } else {
+ /* We need to take it back to defaults, which might mean
+ * stripping is still disabled at the adapter level.
+ */
+ if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ else
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
+ }
+ }
+
+ netdev->features = features;
+
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
@@ -5991,7 +6104,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
resource_size_t mmio_start, mmio_len;
resource_size_t flash_start, flash_len;
-
static int cards_found;
u16 aspm_disable_flag = 0;
int i, err, pci_using_dac;
@@ -6087,7 +6199,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000e_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -6124,7 +6236,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.ms_type = e1000_ms_hw_default;
}
- if (e1000_check_reset_block(&adapter->hw))
+ if (hw->phy.ops.check_reset_block(hw))
e_info("PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
@@ -6133,11 +6245,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX |
NETIF_F_TSO |
NETIF_F_TSO6 |
+ NETIF_F_RXHASH |
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM);
/* Set user-changeable features (subset of all device features) */
netdev->hw_features = netdev->features;
+ netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->hw_features |= NETIF_F_RXALL;
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
netdev->features |= NETIF_F_HW_VLAN_FILTER;
@@ -6231,11 +6347,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
(adapter->hw.bus.func == 1))
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
+ 1, &eeprom_data);
else
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
+ 1, &eeprom_data);
}
/* fetch WoL from EEPROM */
@@ -6268,7 +6384,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
- strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
+ strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -6287,7 +6403,7 @@ err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
- if (!e1000_check_reset_block(&adapter->hw))
+ if (!hw->phy.ops.check_reset_block(hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
kfree(adapter->tx_ring);
@@ -6449,7 +6565,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
- { } /* terminate list */
+ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
@@ -6468,7 +6584,9 @@ static struct pci_driver e1000_driver = {
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
#ifdef CONFIG_PM
- .driver.pm = &e1000_pm_ops,
+ .driver = {
+ .pm = &e1000_pm_ops,
+ },
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
@@ -6485,7 +6603,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
@@ -6510,4 +6628,4 @@ MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-/* e1000_main.c */
+/* netdev.c */
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
new file mode 100644
index 000000000000..a969f1af1b4e
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -0,0 +1,643 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/**
+ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ e1e_flush();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ e1e_flush();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u32 mask;
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ e1e_flush();
+
+ udelay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ e1000_raise_eec_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = er32(EERD);
+ else
+ reg = er32(EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return 0;
+
+ udelay(5);
+ }
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000e_acquire_nvm - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000e_acquire_nvm(struct e1000_hw *hw)
+{
+ u32 eecd = er32(EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+ ew32(EECD, eecd | E1000_EECD_REQ);
+ eecd = er32(EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ udelay(5);
+ eecd = er32(EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ e_dbg("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ eecd = er32(EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * e1000e_release_nvm - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000e_release_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ e1000_stop_nvm(hw);
+
+ eecd = er32(EECD);
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u8 spi_stat_reg;
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(1);
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ ew32(EERD, eerd);
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 widx = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ e1000_standby_nvm(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+ }
+
+ usleep_range(10000, 20000);
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ e_dbg("PBA string buffer was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ e_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ e_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ e_dbg("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ e_dbg("PBA string buffer too small\n");
+ return -E1000_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return 0;
+}
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+/**
+ * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16)NVM_SUM) {
+ e_dbg("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ e_dbg("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000e_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000e_reload_nvm_generic(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 20e93b08e7f3..ff796e42c3eb 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -113,11 +113,20 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define MAX_ITR 100000
#define MIN_ITR 100
-/* IntMode (Interrupt Mode)
+/*
+ * IntMode (Interrupt Mode)
+ *
+ * Valid Range: varies depending on kernel configuration & hardware support
+ *
+ * legacy=0, MSI=1, MSI-X=2
*
- * Valid Range: 0 - 2
+ * When MSI/MSI-X support is enabled in kernel-
+ * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise
+ * When MSI/MSI-X support is not enabled in kernel-
+ * Default Value: 0 (legacy)
*
- * Default Value: 2 (MSI-X)
+ * When a mode is specified that is not allowed/supported, it will be
+ * demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
#define MAX_INTMODE 2
@@ -388,12 +397,33 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
- .err = "defaulting to 2 (MSI-X)",
- .def = E1000E_INT_MODE_MSIX,
- .arg = { .r = { .min = MIN_INTMODE,
- .max = MAX_INTMODE } }
+#ifndef CONFIG_PCI_MSI
+ .err = "defaulting to 0 (legacy)",
+ .def = E1000E_INT_MODE_LEGACY,
+ .arg = { .r = { .min = 0,
+ .max = 0 } }
+#endif
};
+#ifdef CONFIG_PCI_MSI
+ if (adapter->flags & FLAG_HAS_MSIX) {
+ opt.err = kstrdup("defaulting to 2 (MSI-X)",
+ GFP_KERNEL);
+ opt.def = E1000E_INT_MODE_MSIX;
+ opt.arg.r.max = E1000E_INT_MODE_MSIX;
+ } else {
+ opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL);
+ opt.def = E1000E_INT_MODE_MSI;
+ opt.arg.r.max = E1000E_INT_MODE_MSI;
+ }
+
+ if (!opt.err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate memory\n");
+ return;
+ }
+#endif
+
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
e1000_validate_option(&int_mode, &opt, adapter);
@@ -401,6 +431,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
} else {
adapter->int_mode = opt.def;
}
+
+#ifdef CONFIG_PCI_MSI
+ kfree(opt.err);
+#endif
}
{ /* Smart Power Down */
static const struct e1000_option opt = {
@@ -429,10 +463,13 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
e1000_validate_option(&crc_stripping, &opt, adapter);
- if (crc_stripping == OPTION_ENABLED)
+ if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
+ }
} else {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
}
}
{ /* Kumeran Lock Loss Workaround */
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 8666476cb9be..35b45578c604 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -26,8 +26,6 @@
*******************************************************************************/
-#include <linux/delay.h>
-
#include "e1000.h"
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
@@ -132,30 +130,30 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
u16 phy_id;
u16 retry_count = 0;
- if (!(phy->ops.read_reg))
- goto out;
+ if (!phy->ops.read_reg)
+ return 0;
while (retry_count < 2) {
ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
if (ret_val)
- goto out;
+ return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
if (ret_val)
- goto out;
+ return ret_val;
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
- goto out;
+ return 0;
retry_count++;
}
-out:
- return ret_val;
+
+ return 0;
}
/**
@@ -382,29 +380,25 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val = 0;
if (!locked) {
- if (!(hw->phy.ops.acquire))
- goto out;
+ if (!hw->phy.ops.acquire)
+ return 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
- if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
ret_val = e1000e_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
- if (ret_val)
- goto release;
- }
-
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
-
-release:
+ if (!ret_val)
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
if (!locked)
hw->phy.ops.release(hw);
-out:
+
return ret_val;
}
@@ -453,30 +447,25 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val = 0;
if (!locked) {
- if (!(hw->phy.ops.acquire))
- goto out;
+ if (!hw->phy.ops.acquire)
+ return 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
- if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
ret_val = e1000e_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
- if (ret_val)
- goto release;
- }
-
- ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
-
-release:
+ if (!ret_val)
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
+ offset,
+ data);
if (!locked)
hw->phy.ops.release(hw);
-out:
return ret_val;
}
@@ -523,15 +512,16 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
bool locked)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
if (!locked) {
- if (!(hw->phy.ops.acquire))
- goto out;
+ s32 ret_val = 0;
+
+ if (!hw->phy.ops.acquire)
+ return 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
@@ -547,8 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
if (!locked)
hw->phy.ops.release(hw);
-out:
- return ret_val;
+ return 0;
}
/**
@@ -596,15 +585,16 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
bool locked)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
if (!locked) {
- if (!(hw->phy.ops.acquire))
- goto out;
+ s32 ret_val = 0;
+
+ if (!hw->phy.ops.acquire)
+ return 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
@@ -617,8 +607,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
if (!locked)
hw->phy.ops.release(hw);
-out:
- return ret_val;
+ return 0;
}
/**
@@ -663,17 +652,14 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
/* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
- ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
-
-out:
- return ret_val;
+ return e1e_wphy(hw, I82577_CFG_REG, phy_data);
}
/**
@@ -1019,12 +1005,12 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames
- * but not send pause frames).
+ * but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * but we do not support receiving pause frames).
+ * but we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: No software override. The flow control configuration
- * in the EEPROM is used.
+ * in the EEPROM is used.
*/
switch (hw->fc.current_mode) {
case e1000_fc_none:
@@ -1064,8 +1050,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
break;
default:
e_dbg("Flow control param set incorrectly\n");
- ret_val = -E1000_ERR_CONFIG;
- return ret_val;
+ return -E1000_ERR_CONFIG;
}
ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
@@ -1136,13 +1121,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
- e_dbg("Error while waiting for "
- "autoneg to complete\n");
+ e_dbg("Error while waiting for autoneg to complete\n");
return ret_val;
}
}
- hw->mac.get_link_status = 1;
+ hw->mac.get_link_status = true;
return ret_val;
}
@@ -1186,16 +1170,14 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
- ret_val = e1000e_phy_has_link_generic(hw,
- COPPER_LINK_UP_LIMIT,
- 10,
- &link);
+ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
if (ret_val)
return ret_val;
if (link) {
e_dbg("Valid link established!!!\n");
- e1000e_config_collision_dist(hw);
+ hw->mac.ops.config_collision_dist(hw);
ret_val = e1000e_config_fc_after_link_up(hw);
} else {
e_dbg("Unable to establish link!!!\n");
@@ -1251,10 +1233,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
- ret_val = e1000e_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -1262,12 +1242,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
e_dbg("Link taking longer than expected.\n");
/* Try once more */
- ret_val = e1000e_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
- if (ret_val)
- return ret_val;
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
}
return ret_val;
@@ -1401,25 +1377,25 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
if (ret_val)
- goto out;
+ return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &data);
ret_val = e1e_wphy(hw, PHY_CONTROL, data);
if (ret_val)
- goto out;
+ return ret_val;
/* Disable MDI-X support for 10/100 */
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~IFE_PMC_AUTO_MDIX;
data &= ~IFE_PMC_FORCE_MDIX;
ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
if (ret_val)
- goto out;
+ return ret_val;
e_dbg("IFE PMC: %X\n", data);
@@ -1428,27 +1404,22 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
- ret_val = e1000e_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link)
e_dbg("Link taking longer than expected.\n");
/* Try once more */
- ret_val = e1000e_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1506,7 +1477,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
e_dbg("Forcing 10mb\n");
}
- e1000e_config_collision_dist(hw);
+ hw->mac.ops.config_collision_dist(hw);
ew32(CTRL, ctrl);
}
@@ -1833,22 +1804,20 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
phy->min_cable_length = e1000_m88_cable_length_table[index];
phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1918,7 +1887,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
- return ret_val;
+ return 0;
}
/**
@@ -2073,24 +2042,23 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link) {
e_dbg("Phy info is only valid if link is up\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ return -E1000_ERR_CONFIG;
}
ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
if (ret_val)
- goto out;
+ return ret_val;
phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
? false : true;
if (phy->polarity_correction) {
ret_val = e1000_check_polarity_ife(hw);
if (ret_val)
- goto out;
+ return ret_val;
} else {
/* Polarity is forced */
phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
@@ -2100,7 +2068,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
if (ret_val)
- goto out;
+ return ret_val;
phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
@@ -2109,8 +2077,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
phy->local_rx = e1000_1000t_rx_status_undefined;
phy->remote_rx = e1000_1000t_rx_status_undefined;
-out:
- return ret_val;
+ return 0;
}
/**
@@ -2154,7 +2121,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
s32 ret_val;
u32 ctrl;
- ret_val = e1000_check_reset_block(hw);
+ ret_val = phy->ops.check_reset_block(hw);
if (ret_val)
return 0;
@@ -2188,6 +2155,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
s32 e1000e_get_cfg_done(struct e1000_hw *hw)
{
mdelay(10);
+
return 0;
}
@@ -2369,7 +2337,6 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
**/
s32 e1000e_determine_phy_address(struct e1000_hw *hw)
{
- s32 ret_val = -E1000_ERR_PHY_TYPE;
u32 phy_addr = 0;
u32 i;
enum e1000_phy_type phy_type = e1000_phy_unknown;
@@ -2388,17 +2355,15 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
* If phy_type is valid, break - we found our
* PHY address
*/
- if (phy_type != e1000_phy_unknown) {
- ret_val = 0;
- goto out;
- }
+ if (phy_type != e1000_phy_unknown)
+ return 0;
+
usleep_range(1000, 2000);
i++;
} while (i < 10);
}
-out:
- return ret_val;
+ return -E1000_ERR_PHY_TYPE;
}
/**
@@ -2439,7 +2404,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
false, false);
- goto out;
+ goto release;
}
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
@@ -2464,13 +2429,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
if (ret_val)
- goto out;
+ goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
-out:
+release:
hw->phy.ops.release(hw);
return ret_val;
}
@@ -2498,7 +2463,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
true, false);
- goto out;
+ goto release;
}
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
@@ -2523,12 +2488,12 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
if (ret_val)
- goto out;
+ goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
-out:
+release:
hw->phy.ops.release(hw);
return ret_val;
}
@@ -2556,7 +2521,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
true, false);
- goto out;
+ goto release;
}
hw->phy.addr = 1;
@@ -2568,12 +2533,12 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
page);
if (ret_val)
- goto out;
+ goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
-out:
+release:
hw->phy.ops.release(hw);
return ret_val;
}
@@ -2600,7 +2565,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
false, false);
- goto out;
+ goto release;
}
hw->phy.addr = 1;
@@ -2611,13 +2576,13 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
page);
if (ret_val)
- goto out;
+ goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
-out:
+release:
hw->phy.ops.release(hw);
return ret_val;
}
@@ -2642,14 +2607,14 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
if (ret_val) {
e_dbg("Could not set Port Control page\n");
- goto out;
+ return ret_val;
}
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
if (ret_val) {
e_dbg("Could not read PHY register %d.%d\n",
BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
- goto out;
+ return ret_val;
}
/*
@@ -2664,15 +2629,14 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
if (ret_val) {
e_dbg("Could not write PHY register %d.%d\n",
BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
- goto out;
+ return ret_val;
}
- /* Select Host Wakeup Registers page */
- ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
-
- /* caller now able to write registers on the Wakeup registers page */
-out:
- return ret_val;
+ /*
+ * Select Host Wakeup Registers page - caller now able to write
+ * registers on the Wakeup registers page
+ */
+ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
}
/**
@@ -2694,7 +2658,7 @@ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
if (ret_val) {
e_dbg("Could not set Port Control page\n");
- goto out;
+ return ret_val;
}
/* Restore 769.17 to its original value */
@@ -2702,7 +2666,7 @@ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
if (ret_val)
e_dbg("Could not restore PHY register %d.%d\n",
BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
-out:
+
return ret_val;
}
@@ -2750,7 +2714,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
if (ret_val) {
e_dbg("Could not enable PHY wakeup reg access\n");
- goto out;
+ return ret_val;
}
}
@@ -2760,7 +2724,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
if (ret_val) {
e_dbg("Could not write address opcode to page %d\n", page);
- goto out;
+ return ret_val;
}
if (read) {
@@ -2775,13 +2739,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
if (ret_val) {
e_dbg("Could not access PHY reg %d.%d\n", page, reg);
- goto out;
+ return ret_val;
}
if (!page_set)
ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
-out:
return ret_val;
}
@@ -3137,7 +3100,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
if (ret_val) {
e_dbg("Could not write the Address Offset port register\n");
- goto out;
+ return ret_val;
}
/* Read or write the data value next */
@@ -3146,12 +3109,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
else
ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
- if (ret_val) {
+ if (ret_val)
e_dbg("Could not access the Data port register\n");
- goto out;
- }
-out:
return ret_val;
}
@@ -3172,39 +3132,34 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
u16 data;
if (hw->phy.type != e1000_phy_82578)
- goto out;
+ return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
e1e_rphy(hw, PHY_CONTROL, &data);
if (data & PHY_CONTROL_LB)
- goto out;
+ return 0;
/* check if link is up and at 1Gbps */
ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
if (ret_val)
- goto out;
+ return ret_val;
- data &= BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK;
- if (data != (BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_1000))
- goto out;
+ if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ return 0;
- mdelay(200);
+ msleep(200);
/* flush the packets in the fifo buffer */
ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
HV_MUX_DATA_CTRL_FORCE_SPEED);
if (ret_val)
- goto out;
-
- ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
+ return ret_val;
-out:
- return ret_val;
+ return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
}
/**
@@ -3246,39 +3201,32 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
udelay(1);
if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
- ret_val = e1000e_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link)
e_dbg("Link taking longer than expected.\n");
/* Try once more */
- ret_val = e1000e_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
- if (ret_val)
- goto out;
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
}
-out:
return ret_val;
}
@@ -3300,23 +3248,22 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link) {
e_dbg("Phy info is only valid if link is up\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ return -E1000_ERR_CONFIG;
}
phy->polarity_correction = true;
ret_val = e1000_check_polarity_82577(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (ret_val)
- goto out;
+ return ret_val;
phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
@@ -3324,11 +3271,11 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
I82577_PHY_STATUS2_SPEED_1000MBPS) {
ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
if (ret_val)
- goto out;
+ return ret_val;
phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok
@@ -3343,8 +3290,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
phy->remote_rx = e1000_1000t_rx_status_undefined;
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -3362,7 +3308,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
@@ -3372,6 +3318,5 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
phy->cable_length = length;
-out:
- return ret_val;
+ return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index aed217449f0d..89eb1f85b9fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -134,6 +134,8 @@
#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/*
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index aa399a8a8f0d..e10821a0f249 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1577,7 +1577,9 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
+ struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@@ -1601,6 +1603,8 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ total_bytes += tx_buffer_info->bytecount;
+ total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@@ -1615,6 +1619,9 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
+ txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
+
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 94be6c32fa7d..c4902411d749 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -173,7 +173,9 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *);
+#endif
static int igb_resume(struct device *);
#ifdef CONFIG_PM_RUNTIME
static int igb_runtime_suspend(struct device *dev);
@@ -1767,10 +1769,21 @@ static int igb_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
+ struct igb_adapter *adapter = netdev_priv(netdev);
if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features);
+ if (!(changed & NETIF_F_RXALL))
+ return 0;
+
+ netdev->features = features;
+
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
+ igb_reset(adapter);
+
return 0;
}
@@ -1952,6 +1965,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_RXALL;
/* set this bit last since it cannot be part of hw_features */
netdev->features |= NETIF_F_HW_VLAN_FILTER;
@@ -1962,6 +1976,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
NETIF_F_IPV6_CSUM |
NETIF_F_SG;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -2750,6 +2766,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
+
+ netdev_tx_reset_queue(txring_txq(ring));
}
/**
@@ -2999,6 +3017,22 @@ void igb_setup_rctl(struct igb_adapter *adapter)
wr32(E1000_QDE, ALL_QUEUES);
}
+ /* This is useful for sniffing bad packets. */
+ if (adapter->netdev->features & NETIF_F_RXALL) {
+ /* UPE and MPE will be handled by normal PROMISC logic
+ * in e1000e_set_rx_mode */
+ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+ E1000_RCTL_BAM | /* RX All Bcast Pkts */
+ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+ rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+ E1000_RCTL_DPF | /* Allow filtered pause */
+ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+ /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+ * and that breaks VLANs.
+ */
+ }
+
wr32(E1000_RCTL, rctl);
}
@@ -3242,7 +3276,6 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
- netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4290,6 +4323,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
/* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
+ if (unlikely(skb->no_fcs))
+ cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
tx_desc->read.cmd_type_len = cmd_type;
/* set the timestamp */
@@ -6095,8 +6130,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
goto next_desc;
}
- if (igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK))
+ && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_any(skb);
goto next_desc;
}
@@ -6710,6 +6746,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *dev)
{
int retval;
@@ -6729,6 +6766,7 @@ static int igb_suspend(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static int igb_resume(struct device *dev)
{
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 33f40d3474ae..3e18045d8f89 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -97,10 +97,6 @@
#define E1000_ERR_MAC_INIT 5
#define E1000_ERR_MBX 15
-#ifndef ETH_ADDR_LEN
-#define ETH_ADDR_LEN 6
-#endif
-
/* SRRCTL bit definitions */
#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index db7dce2351c2..8ce67064b9c5 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -343,10 +343,10 @@ static int igbvf_get_coalesce(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->itr_setting;
+ if (adapter->requested_itr <= 3)
+ ec->rx_coalesce_usecs = adapter->requested_itr;
else
- ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+ ec->rx_coalesce_usecs = adapter->current_itr >> 2;
return 0;
}
@@ -365,15 +365,16 @@ static int igbvf_set_coalesce(struct net_device *netdev,
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
- adapter->itr = IGBVF_START_ITR;
- adapter->itr_setting = ec->rx_coalesce_usecs;
+ adapter->current_itr = IGBVF_START_ITR;
+ adapter->requested_itr = ec->rx_coalesce_usecs;
} else {
- adapter->itr = ec->rx_coalesce_usecs << 2;
- adapter->itr_setting = adapter->itr;
+ adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ adapter->requested_itr = 1000000000 /
+ (adapter->current_itr * 256);
}
- writel(adapter->itr,
- hw->hw_addr + adapter->rx_ring[0].itr_register);
+ writel(adapter->current_itr,
+ hw->hw_addr + adapter->rx_ring->itr_register);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 2c6d87e4d3d9..a895e2f7b34d 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -43,7 +43,18 @@ struct igbvf_info;
struct igbvf_adapter;
/* Interrupt defines */
-#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
+#define IGBVF_START_ITR 488 /* ~8000 ints/sec */
+#define IGBVF_4K_ITR 980
+#define IGBVF_20K_ITR 196
+#define IGBVF_70K_ITR 56
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
/* Interrupt modes, as used by the IntMode parameter */
#define IGBVF_INT_MODE_LEGACY 0
@@ -155,6 +166,7 @@ struct igbvf_ring {
char name[IFNAMSIZ + 5];
u32 eims_value;
u32 itr_val;
+ enum latency_range itr_range;
u16 itr_register;
int set_itr;
@@ -187,10 +199,8 @@ struct igbvf_adapter {
unsigned long state;
/* Interrupt Throttle Rate */
- u32 itr;
- u32 itr_setting;
- u16 tx_itr;
- u16 rx_itr;
+ u32 requested_itr; /* ints/sec or adaptive */
+ u32 current_itr; /* Actual ITR register value, not ints/sec */
/*
* Tx
@@ -299,13 +309,6 @@ enum igbvf_state_t {
__IGBVF_DOWN
};
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
extern char igbvf_driver_name[];
extern const char igbvf_driver_version[];
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 4e9141cfe81d..217c143686d2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -632,14 +632,13 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
* traffic pattern. Constants in this function were computed
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
- * while increasing bulk throughput. This functionality is controlled
- * by the InterruptThrottleRate module parameter.
+ * while increasing bulk throughput.
**/
-static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
- u16 itr_setting, int packets,
- int bytes)
+static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
+ enum latency_range itr_setting,
+ int packets, int bytes)
{
- unsigned int retval = itr_setting;
+ enum latency_range retval = itr_setting;
if (packets == 0)
goto update_itr_done;
@@ -675,65 +674,87 @@ static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
retval = low_latency;
}
break;
+ default:
+ break;
}
update_itr_done:
return retval;
}
-static void igbvf_set_itr(struct igbvf_adapter *adapter)
+static int igbvf_range_to_itr(enum latency_range current_range)
{
- struct e1000_hw *hw = &adapter->hw;
- u16 current_itr;
- u32 new_itr = adapter->itr;
-
- adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
- adapter->tx_itr = low_latency;
-
- adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
- adapter->rx_itr = low_latency;
-
- current_itr = max(adapter->rx_itr, adapter->tx_itr);
+ int new_itr;
- switch (current_itr) {
+ switch (current_range) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 70000;
+ new_itr = IGBVF_70K_ITR;
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = IGBVF_20K_ITR;
break;
case bulk_latency:
- new_itr = 4000;
+ new_itr = IGBVF_4K_ITR;
break;
default:
+ new_itr = IGBVF_START_ITR;
break;
}
+ return new_itr;
+}
+
+static void igbvf_set_itr(struct igbvf_adapter *adapter)
+{
+ u32 new_itr;
+
+ adapter->tx_ring->itr_range =
+ igbvf_update_itr(adapter,
+ adapter->tx_ring->itr_val,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->requested_itr == 3 &&
+ adapter->tx_ring->itr_range == lowest_latency)
+ adapter->tx_ring->itr_range = low_latency;
+
+ new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
+
- if (new_itr != adapter->itr) {
+ if (new_itr != adapter->tx_ring->itr_val) {
+ u32 current_itr = adapter->tx_ring->itr_val;
/*
* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing
*/
- new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
- adapter->itr = new_itr;
- adapter->rx_ring->itr_val = 1952;
-
- if (adapter->msix_entries)
- adapter->rx_ring->set_itr = 1;
- else
- ew32(ITR, 1952);
+ new_itr = new_itr > current_itr ?
+ min(current_itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->tx_ring->itr_val = new_itr;
+
+ adapter->tx_ring->set_itr = 1;
+ }
+
+ adapter->rx_ring->itr_range =
+ igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ if (adapter->requested_itr == 3 &&
+ adapter->rx_ring->itr_range == lowest_latency)
+ adapter->rx_ring->itr_range = low_latency;
+
+ new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
+
+ if (new_itr != adapter->rx_ring->itr_val) {
+ u32 current_itr = adapter->rx_ring->itr_val;
+ new_itr = new_itr > current_itr ?
+ min(current_itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->rx_ring->itr_val = new_itr;
+
+ adapter->rx_ring->set_itr = 1;
}
}
@@ -835,6 +856,11 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
struct igbvf_ring *tx_ring = adapter->tx_ring;
+ if (tx_ring->set_itr) {
+ writel(tx_ring->itr_val,
+ adapter->hw.hw_addr + tx_ring->itr_register);
+ adapter->tx_ring->set_itr = 0;
+ }
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -937,19 +963,10 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
adapter->eims_enable_mask |= tx_ring->eims_value;
- if (tx_ring->itr_val)
- writel(tx_ring->itr_val,
- hw->hw_addr + tx_ring->itr_register);
- else
- writel(1952, hw->hw_addr + tx_ring->itr_register);
-
+ writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
adapter->eims_enable_mask |= rx_ring->eims_value;
- if (rx_ring->itr_val)
- writel(rx_ring->itr_val,
- hw->hw_addr + rx_ring->itr_register);
- else
- writel(1952, hw->hw_addr + rx_ring->itr_register);
+ writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
/* set vector for other causes, i.e. link changes */
@@ -1027,7 +1044,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
goto out;
adapter->tx_ring->itr_register = E1000_EITR(vector);
- adapter->tx_ring->itr_val = 1952;
+ adapter->tx_ring->itr_val = adapter->current_itr;
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
@@ -1037,7 +1054,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
goto out;
adapter->rx_ring->itr_register = E1000_EITR(vector);
- adapter->rx_ring->itr_val = 1952;
+ adapter->rx_ring->itr_val = adapter->current_itr;
vector++;
err = request_irq(adapter->msix_entries[vector].vector,
@@ -1151,7 +1168,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 3)
+ if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);
if (!test_bit(__IGBVF_DOWN, &adapter->state))
@@ -1521,8 +1538,8 @@ static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
adapter->tx_abs_int_delay = 32;
adapter->rx_int_delay = 0;
adapter->rx_abs_int_delay = 8;
- adapter->itr_setting = 3;
- adapter->itr = 20000;
+ adapter->requested_itr = 3;
+ adapter->current_itr = IGBVF_START_ITR;
/* Set various function pointers */
adapter->ei->init_ops(&adapter->hw);
@@ -1695,6 +1712,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -2695,18 +2713,19 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
dev_info(&pdev->dev,
"PF still in reset state, assigning new address."
" Is the PF interface up?\n");
- dev_hw_addr_random(adapter->netdev, hw->mac.addr);
+ eth_hw_addr_random(netdev);
+ memcpy(adapter->hw.mac.addr, netdev->dev_addr,
+ netdev->addr_len);
} else {
err = hw->mac.ops.read_mac_addr(hw);
if (err) {
dev_err(&pdev->dev, "Error reading MAC address\n");
goto err_hw_init;
}
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
if (!is_valid_ether_addr(netdev->perm_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
netdev->dev_addr);
@@ -2714,6 +2733,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
goto err_hw_init;
}
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
(unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 19551977b352..30a6cc426037 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -246,7 +246,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
for (i = 0; i < cnt; i++) {
hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
hash_list[i] = hash_value & 0x0FFFF;
- mc_addr_list += ETH_ADDR_LEN;
+ mc_addr_list += ETH_ALEN;
}
mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE);
@@ -333,10 +333,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
**/
static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
{
- int i;
-
- for (i = 0; i < ETH_ADDR_LEN; i++)
- hw->mac.addr[i] = hw->mac.perm_addr[i];
+ memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN);
return E1000_SUCCESS;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index cb23448fe2fa..4d2ae97ff1b3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -75,18 +75,6 @@ struct ixgb_adapter;
#include "ixgb_ee.h"
#include "ixgb_ids.h"
-#define PFX "ixgb: "
-
-#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
-#else
-#define IXGB_DBG(fmt, args...) \
-do { \
- if (0) \
- printk(KERN_DEBUG PFX fmt, ##args); \
-} while (0)
-#endif
-
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
#define MAX_TXD 4096
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
index 2ed925f38811..eca216b9b859 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
@@ -533,10 +533,8 @@ __le16
ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
{
- if ((index < IXGB_EEPROM_SIZE) &&
- (ixgb_check_and_get_eeprom_data(hw) == true)) {
- return hw->eeprom[index];
- }
+ if (index < IXGB_EEPROM_SIZE && ixgb_check_and_get_eeprom_data(hw))
+ return hw->eeprom[index];
return 0;
}
@@ -558,7 +556,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
ENTER();
- if (ixgb_check_and_get_eeprom_data(hw) == true) {
+ if (ixgb_check_and_get_eeprom_data(hw)) {
for (i = 0; i < ETH_ALEN; i++) {
mac_addr[i] = ee_map->mac_addr[i];
}
@@ -578,7 +576,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
u32
ixgb_get_ee_pba_number(struct ixgb_hw *hw)
{
- if (ixgb_check_and_get_eeprom_data(hw) == true)
+ if (ixgb_check_and_get_eeprom_data(hw))
return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
| (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
@@ -599,7 +597,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- if (ixgb_check_and_get_eeprom_data(hw) == true)
+ if (ixgb_check_and_get_eeprom_data(hw))
return le16_to_cpu(ee_map->device_id);
return 0;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9bd5faf64a85..82aaa792cbf3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1136,10 +1136,8 @@ ixgb_set_multi(struct net_device *netdev)
u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
ETH_ALEN, GFP_ATOMIC);
u8 *addr;
- if (!mta) {
- pr_err("allocation of multicast memory failed\n");
+ if (!mta)
goto alloc_failed;
- }
IXGB_WRITE_REG(hw, RCTL, rctl);
@@ -2070,8 +2068,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
/* All receives must fit into a single buffer */
- IXGB_DBG("Receive packet consumed multiple buffers "
- "length<%x>\n", length);
+ pr_debug("Receive packet consumed multiple buffers length<%x>\n",
+ length);
dev_kfree_skb_irq(skb);
goto rxdesc_done;
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 7a16177a12a5..8be1d1b2132e 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e6aeb64105a4..80e26ff30ebf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -72,12 +72,6 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
-#define IXGBE_RXBUFFER_2K 2048
-#define IXGBE_RXBUFFER_3K 3072
-#define IXGBE_RXBUFFER_4K 4096
-#define IXGBE_RXBUFFER_7K 7168
-#define IXGBE_RXBUFFER_8K 8192
-#define IXGBE_RXBUFFER_15K 15360
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
@@ -102,14 +96,11 @@
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
-#define IXGBE_MAX_RSC_INT_RATE 162760
-
#define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_VF_FUNCTIONS 64
#define IXGBE_MAX_VFTA_ENTRIES 128
@@ -156,19 +147,19 @@ struct vf_macvlans {
struct ixgbe_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- dma_addr_t dma;
- u32 length;
- u32 tx_flags;
struct sk_buff *skb;
- u32 bytecount;
- u16 gso_segs;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -180,7 +171,6 @@ struct ixgbe_queue_stats {
struct ixgbe_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
- u64 completed;
u64 tx_done_old;
};
@@ -190,22 +180,18 @@ struct ixgbe_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
+ u64 csum_err;
};
-enum ixbge_ring_state_t {
+enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
- __IXGBE_RX_PS_ENABLED,
__IXGBE_RX_RSC_ENABLED,
+ __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+ __IXGBE_RX_FCOE_BUFSZ,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
@@ -220,18 +206,20 @@ enum ixbge_ring_state_t {
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
+ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct device *dev; /* device for DMA mapping */
void *desc; /* descriptor ring memory */
- struct device *dev; /* device for DMA mapping */
- struct net_device *netdev; /* netdev ring belongs to */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
unsigned long state;
u8 __iomem *tail;
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
- u16 rx_buf_len;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -239,12 +227,17 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
- u8 atr_sample_rate;
- u8 atr_count;
-
u16 next_to_use;
u16 next_to_clean;
+ union {
+ u16 next_to_alloc;
+ struct {
+ u8 atr_sample_rate;
+ u8 atr_count;
+ };
+ };
+
u8 dcb_tc;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
@@ -252,11 +245,6 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
- int numa_node;
- unsigned int size; /* length in bytes */
- dma_addr_t dma; /* phys. address of descriptor ring */
- struct rcu_head rcu;
- struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -287,6 +275,22 @@ struct ixgbe_ring_feature {
int mask;
} ____cacheline_internodealigned_in_smp;
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+ return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+}
+#else
+#define ixgbe_rx_pg_order(_ring) 0
+#endif
+#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
+#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
+
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -296,6 +300,10 @@ struct ixgbe_ring_container {
u8 itr; /* current ITR setting for ring */
};
+/* iterator for handling rings in ring container */
+#define ixgbe_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
@@ -315,8 +323,13 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
- cpumask_var_t affinity_mask;
+ cpumask_t affinity_mask;
+ int numa_node;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
/*
@@ -329,6 +342,13 @@ struct ixgbe_q_vector {
#define IXGBE_10K_ITR 400
#define IXGBE_8K_ITR 500
+/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
{
u16 ntc = ring->next_to_clean;
@@ -337,11 +357,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
-#define IXGBE_RX_DESC_ADV(R, i) \
+#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i) \
+#define IXGBE_TX_DESC(R, i) \
(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+#define IXGBE_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
@@ -361,18 +381,25 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+/* default to trying for four seconds */
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+
/* board specific private data structure */
struct ixgbe_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
unsigned long state;
/* Some features need tri-state capability,
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
@@ -409,60 +436,52 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
-
- /* DCB parameters */
- struct ieee_pfc *ixgbe_ieee_pfc;
- struct ieee_ets *ixgbe_ieee_ets;
- struct ixgbe_dcb_config dcb_cfg;
- struct ixgbe_dcb_config temp_dcb_cfg;
- u8 dcb_set_bitmap;
- u8 dcbx_cap;
- enum ixgbe_fc_mode last_lfc_mode;
-
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
- u16 eitr_low;
- u16 eitr_high;
-
- /* Work limits */
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
u16 tx_work_limit;
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
- int num_tx_queues;
- u32 tx_timeout_count;
- bool detect_tx_hung;
u64 restart_queue;
u64 lsc_int;
+ u32 tx_timeout_count;
/* RX */
- struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
- int num_rx_queues;
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
int num_rx_pools; /* == num_rx_queues in 82598 */
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
u64 non_eop_descs;
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
- struct msix_entry *msix_entries;
-
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
-/* default to trying for four seconds */
-#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
+ /* DCB parameters */
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
+ struct ixgbe_dcb_config dcb_cfg;
+ struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 dcb_set_bitmap;
+ u8 dcbx_cap;
+ enum ixgbe_fc_mode last_lfc_mode;
+
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
u32 test_icr;
struct ixgbe_ring test_tx_ring;
@@ -473,10 +492,6 @@ struct ixgbe_adapter {
u16 msg_enable;
struct ixgbe_hw_stats stats;
- /* Interrupt Throttle Rate */
- u32 rx_eitr_param;
- u32 tx_eitr_param;
-
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
@@ -485,25 +500,30 @@ struct ixgbe_adapter {
bool link_up;
unsigned long link_check_timeout;
- struct work_struct service_task;
struct timer_list service_timer;
+ struct work_struct service_task;
+
+ struct hlist_head fdir_filter_list;
+ unsigned long fdir_overflow; /* number of times ATR was backed off */
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
u32 fdir_pballoc;
u32 atr_sample_rate;
- unsigned long fdir_overflow; /* number of times ATR was backed off */
spinlock_t fdir_perfect_lock;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
- u64 rsc_total_count;
- u64 rsc_total_flush;
u32 wol;
+
+ u16 bd_number;
+
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
- int node;
- u32 led_reg;
u32 interrupt_event;
+ u32 led_reg;
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -513,9 +533,6 @@ struct ixgbe_adapter {
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
- struct hlist_head fdir_filter_list;
- union ixgbe_atr_input fdir_mask;
- int fdir_filter_count;
u32 timer_event_accumulator;
u32 vferr_refcount;
};
@@ -535,12 +552,16 @@ enum ixbge_state_t {
__IXGBE_IN_SFP_INIT,
};
-struct ixgbe_rsc_cb {
+struct ixgbe_cb {
+ union { /* Union defining head/tail partner */
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ };
dma_addr_t dma;
- u16 skb_cnt;
- bool delay_unmap;
+ u16 append_cnt;
+ bool page_released;
};
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
enum ixgbe_boards {
board_82598,
@@ -560,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
+#ifdef IXGBE_FCOE
extern char ixgbe_default_device_descr[];
+#endif /* IXGBE_FCOE */
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
@@ -585,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
+extern int ixgbe_poll(struct napi_struct *napi, int budget);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
@@ -604,18 +628,20 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
+#ifdef CONFIG_IXGBE_DCB
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+#endif
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len);
+extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb,
- u32 staterr);
+ struct sk_buff *skb);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
@@ -632,4 +658,9 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
#endif /* IXGBE_FCOE */
+static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index b406c367b190..85d2e2c4ce4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -213,15 +213,15 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@@ -617,7 +617,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*link_up = false;
}
- if (*link_up == false)
+ if (!*link_up)
goto out;
}
@@ -645,7 +645,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
(ixgbe_validate_link_ready(hw) != 0))
*link_up = false;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4e59083a3de2..9c14685358eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -779,7 +779,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
/* Check to see if speed passed in is supported. */
- hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
+ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
+ &autoneg);
if (status != 0)
goto out;
@@ -1906,38 +1907,17 @@ out:
**/
static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
-#define IXGBE_MAX_SECRX_POLL 30
- int i;
- int secrxreg;
-
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
* If traffic is incoming before we enable the Rx unit, it could hang
* the Rx DMA unit. Therefore, make sure the security engine is
* completely disabled prior to enabling the Rx unit.
*/
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
- if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
- break;
- else
- /* Use interrupt-safe sleep just in case */
- udelay(10);
- }
-
- /* For informational purposes only */
- if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw->mac.ops.disable_rx_buff(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- IXGBE_WRITE_FLUSH(hw);
+
+ hw->mac.ops.enable_rx_buff(hw);
return 0;
}
@@ -2102,6 +2082,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_media_type = &ixgbe_get_media_type_82599,
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
+ .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 383b9413292e..49aa41fe7b84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -128,14 +128,14 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@@ -2011,13 +2011,20 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrth = hw->fc.high_water[packetbuf_num] << 10;
fcrtl = hw->fc.low_water << 10;
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth = hw->fc.high_water[packetbuf_num] << 10;
fcrth |= IXGBE_FCRTH_FCEN;
if (hw->fc.send_xon)
fcrtl |= IXGBE_FCRTL_XONE;
+ } else {
+ /*
+ * If Tx flow control is disabled, set our high water mark
+ * to Rx FIFO size minus 32 in order prevent Tx switch
+ * loopback from stalling on DMA.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32;
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
@@ -2578,6 +2585,58 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
/**
+ * ixgbe_disable_rx_buff_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally
+ * empty the Rx security block.
+ **/
+s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+ int i;
+ int secrxreg;
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ udelay(10);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ hw_dbg(hw, "Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return 0;
+
+}
+
+/**
+ * ixgbe_enable_rx_buff - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path
+ **/
+s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+{
+ int secrxreg;
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
@@ -3336,7 +3395,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* @hw: pointer to the HW structure
* @buffer: contains the command to write and where the return status will
* be placed
- * @lenght: lenght of buffer, must be multiple of 4 bytes
+ * @length: length of buffer, must be multiple of 4 bytes
*
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2c834c46bba1..204f06235b45 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -74,6 +74,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 79a92fe987b9..dde65f951400 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -111,7 +111,7 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- u8 err = 0;
+ int err = 0;
u8 prio_tc[MAX_USER_PRIORITY] = {0};
int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -122,7 +122,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
/* verify there is something to do, if not then exit */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return err;
+ goto out;
if (state > 0) {
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
@@ -131,10 +131,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
err = ixgbe_setup_tc(netdev, 0);
}
+ if (err)
+ goto out;
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
- return err;
+out:
+ return err ? 1 : 0;
}
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -486,7 +490,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
return 0;
}
-static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u8 rval = 0;
@@ -510,7 +514,7 @@ static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
return rval;
}
-static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
{
return -EINVAL;
}
@@ -581,7 +585,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int i;
+ int i, err = 0;
__u8 max_tc = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -608,12 +612,17 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
return -EINVAL;
if (max_tc != netdev_get_num_tc(dev))
- ixgbe_setup_tc(dev, max_tc);
+ err = ixgbe_setup_tc(dev, max_tc);
+
+ if (err)
+ goto err_out;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
- return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
+ err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
+err_out:
+ return err;
}
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
@@ -726,6 +735,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
+ int err = 0;
/* no support for LLD_MANAGED modes or CEE+IEEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -756,10 +766,10 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
*/
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
- ixgbe_setup_tc(dev, 0);
+ err = ixgbe_setup_tc(dev, 0);
}
- return 0;
+ return err ? 1 : 0;
}
const struct dcbnl_rtnl_ops dcbnl_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index a62975480e37..31a2bf76a346 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -35,6 +35,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
+#include <linux/highmem.h>
#include <linux/uaccess.h>
#include "ixgbe.h"
@@ -935,12 +936,12 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring[0]->count) &&
@@ -1591,7 +1592,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
- tx_ring->numa_node = adapter->node;
err = ixgbe_setup_tx_resources(tx_ring);
if (err)
@@ -1616,8 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
- rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
- rx_ring->numa_node = adapter->node;
err = ixgbe_setup_rx_resources(rx_ring);
if (err) {
@@ -1703,63 +1701,72 @@ static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
}
static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
+ unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ frame_size >>= 1;
+ memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size + 10], 0xBE, 1);
+ memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
+static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size &= ~1;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+ data = kmap(rx_buffer->page) + rx_buffer->page_offset;
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+ kunmap(rx_buffer->page);
+
+ return match;
}
static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
- struct ixgbe_ring *tx_ring,
- unsigned int size)
+ struct ixgbe_ring *tx_ring,
+ unsigned int size)
{
union ixgbe_adv_rx_desc *rx_desc;
- struct ixgbe_rx_buffer *rx_buffer_info;
- struct ixgbe_tx_buffer *tx_buffer_info;
- const int bufsz = rx_ring->rx_buf_len;
- u32 staterr;
+ struct ixgbe_rx_buffer *rx_buffer;
+ struct ixgbe_tx_buffer *tx_buffer;
u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
tx_ntc = tx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
- while (staterr & IXGBE_RXD_STAT_DD) {
+ while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
/* check Rx buffer */
- rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
- /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- bufsz,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer->dma,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+
/* unmap buffer on Tx side */
- tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -1770,8 +1777,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = 0;
/* fetch next descriptor */
- rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
}
/* re-map buffers to ring, store next to clean values */
@@ -2108,8 +2114,6 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
-
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
ec->rx_coalesce_usecs = adapter->rx_itr_setting;
@@ -2133,31 +2137,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
* this function must be called before setting the new value of
* rx_itr_setting
*/
-static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
- struct ethtool_coalesce *ec)
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+ /* nothing to do if LRO or RSC are not enabled */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
+ !(netdev->features & NETIF_F_LRO))
return false;
- /* if interrupt rate is too high then disable RSC */
- if (ec->rx_coalesce_usecs != 1 &&
- ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) {
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- e_info(probe, "rx-usecs set too low, disabling RSC\n");
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
- return true;
- }
- } else {
- /* check the feature flag value and enable RSC if necessary */
- if ((netdev->features & NETIF_F_LRO) &&
- !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
- e_info(probe, "rx-usecs set to %d, re-enabling RSC\n",
- ec->rx_coalesce_usecs);
+ /* check the feature flag value and enable RSC if necessary */
+ if (adapter->rx_itr_setting == 1 ||
+ adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ e_info(probe, "rx-usecs value high enough "
+ "to re-enable RSC\n");
return true;
}
+ /* if interrupt rate is too high then disable RSC */
+ } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ e_info(probe, "rx-usecs set too low, disabling RSC\n");
+ return true;
}
return false;
}
@@ -2177,16 +2179,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
&& ec->tx_coalesce_usecs)
return -EINVAL;
- if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
-
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
return -EINVAL;
- /* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter, ec);
-
if (ec->rx_coalesce_usecs > 1)
adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
@@ -2207,6 +2203,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter);
+
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
@@ -2214,7 +2213,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
- q_vector->tx.work_limit = adapter->tx_work_limit;
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->itr = tx_itr_param;
@@ -2328,6 +2326,48 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* if RSS is disabled then report no hashing */
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return 0;
+
+ /* Report default options for RSS on ixgbe */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2349,6 +2389,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
+ case ETHTOOL_GRXFH:
+ ret = ixgbe_get_rss_hash_opts(adapter, cmd);
+ break;
default:
break;
}
@@ -2583,6 +2626,111 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
return err;
}
+#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
+ IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags2 = adapter->flags2;
+
+ /*
+ * RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags2 != adapter->flags2) {
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+
+ if ((flags2 & UDP_RSS_FLAGS) &&
+ !(adapter->flags2 & UDP_RSS_FLAGS))
+ e_warn(drv, "enabling UDP RSS: fragmented packets"
+ " may arrive out of order to the stack above\n");
+
+ adapter->flags2 = flags2;
+
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
+ IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -2595,6 +2743,9 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
+ case ETHTOOL_SRXFH:
+ ret = ixgbe_set_rss_hash_opt(adapter, cmd);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 4bc794249801..77ea4b716535 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -357,22 +357,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
*/
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb,
- u32 staterr)
+ struct sk_buff *skb)
{
- u16 xid;
- u32 fctl;
- u32 fceofe, fcerr, fcstat;
int rc = -EINVAL;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
struct fcoe_crc_eof *crc;
+ __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
+ __le32 ddp_err;
+ u32 fctl;
+ u16 xid;
- fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
- fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
- if (fcerr == IXGBE_FCERR_BADCRC)
- skb_checksum_none_assert(skb);
+ if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
+ skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -382,6 +380,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
else
fh = (struct fc_frame_header *)(skb->data +
sizeof(struct fcoe_hdr));
+
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = be16_to_cpu(fh->fh_ox_id);
@@ -396,27 +395,39 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (!ddp->udl)
goto ddp_out;
- if (fcerr | fceofe)
+ ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
+ IXGBE_RXDADV_ERR_FCERR);
+ if (ddp_err)
goto ddp_out;
- fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
- if (fcstat) {
+ switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
+ /* return 0 to bypass going to ULD for DDPed data */
+ case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
- /* unmap the sg list when FCP_RSP is received */
- if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
- pci_unmap_sg(adapter->pdev, ddp->sgl,
- ddp->sgc, DMA_FROM_DEVICE);
- ddp->err = (fcerr | fceofe);
- ddp->sgl = NULL;
- ddp->sgc = 0;
- }
- /* return 0 to bypass going to ULD for DDPed data */
- if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
- rc = 0;
- else if (ddp->len)
+ rc = 0;
+ break;
+ /* unmap the sg list when FCPRSP is received */
+ case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
+ pci_unmap_sg(adapter->pdev, ddp->sgl,
+ ddp->sgc, DMA_FROM_DEVICE);
+ ddp->err = ddp_err;
+ ddp->sgl = NULL;
+ ddp->sgc = 0;
+ /* fall through */
+ /* if DDP length is present pass it through to ULD */
+ case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
+ /* update length of DDPed data */
+ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ if (ddp->len)
rc = ddp->len;
+ break;
+ /* no match will return as an error */
+ case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
+ default:
+ break;
}
+
/* In target mode, check the last data frame of the sequence.
* For DDP in target mode, data is already DDPed but the header
* indication of the last data frame ould allow is to tell if we
@@ -436,17 +447,18 @@ ddp_out:
/**
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring
- * @skb: associated skb
- * @tx_flags: tx flags
+ * @first: first tx_buffer structure containing skb, tx_flags, and protocol
* @hdr_len: hdr_len to be returned
*
* This sets up large send offload for FCoE
*
- * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
+ * Returns : 0 indicates success, < 0 for error
*/
-int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+int ixgbe_fso(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ u8 *hdr_len)
{
+ struct sk_buff *skb = first->skb;
struct fc_frame_header *fh;
u32 vlan_macip_lens;
u32 fcoe_sof_eof = 0;
@@ -519,9 +531,18 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
*hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
- if (skb_is_gso(skb))
- *hdr_len += (skb_transport_offset(skb) +
- sizeof(struct fc_frame_header));
+ if (skb_is_gso(skb)) {
+ *hdr_len += skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header);
+ /* update gso_segs and bytecount */
+ first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
+ skb_shinfo(skb)->gso_size);
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+ first->tx_flags |= IXGBE_TX_FLAGS_FSO;
+ }
+
+ /* set flag indicating FCOE to ixgbe_tx_map call */
+ first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -532,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
sizeof(struct fc_frame_header);
vlan_macip_lens |= (skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
- return skb_is_gso(skb);
+ return 0;
}
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
new file mode 100644
index 000000000000..027d7a75be39
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -0,0 +1,929 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_sriov.h"
+
+/**
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return false;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
+}
+#ifdef CONFIG_IXGBE_DCB
+
+/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
+static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+ unsigned int *tx, unsigned int *rx)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ *tx = 0;
+ *rx = 0;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ *tx = tc << 2;
+ *rx = tc << 3;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (num_tcs > 4) {
+ if (tc < 3) {
+ *tx = tc << 5;
+ *rx = tc << 4;
+ } else if (tc < 5) {
+ *tx = ((tc + 2) << 4);
+ *rx = tc << 4;
+ } else if (tc < num_tcs) {
+ *tx = ((tc + 8) << 3);
+ *rx = tc << 4;
+ }
+ } else {
+ *rx = tc << 5;
+ switch (tc) {
+ case 0:
+ *tx = 0;
+ break;
+ case 1:
+ *tx = 64;
+ break;
+ case 2:
+ *tx = 96;
+ break;
+ case 3:
+ *tx = 112;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ int i, j, k;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ if (!num_tcs)
+ return false;
+
+ for (i = 0, k = 0; i < num_tcs; i++) {
+ unsigned int tx_s, rx_s;
+ u16 count = dev->tc_to_txq[i].count;
+
+ ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
+ for (j = 0; j < count; j++, k++) {
+ adapter->tx_ring[k]->reg_idx = tx_s + j;
+ adapter->rx_ring[k]->reg_idx = rx_s + j;
+ adapter->tx_ring[k]->dcb_tc = i;
+ adapter->rx_ring[k]->dcb_tc = i;
+ }
+ }
+
+ return true;
+}
+#endif
+
+/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+ int i;
+ bool ret = false;
+
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+ ret = true;
+ }
+
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ int i;
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
+
+ fcoe_rx_i = f->mask;
+ fcoe_tx_i = f->mask;
+ }
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ }
+ return true;
+}
+
+#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important. It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+{
+ /* start with default case */
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_cache_ring_dcb(adapter))
+ return;
+#endif
+
+#ifdef IXGBE_FCOE
+ if (ixgbe_cache_ring_fcoe(adapter))
+ return;
+#endif /* IXGBE_FCOE */
+
+ if (ixgbe_cache_ring_fdir(adapter))
+ return;
+
+ if (ixgbe_cache_ring_rss(adapter))
+ return;
+}
+
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
+/**
+ * ixgbe_set_rss_queues: Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ f->mask = 0xF;
+ adapter->num_rx_queues = f->indices;
+ adapter->num_tx_queues = f->indices;
+ ret = true;
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session. This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+ f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
+ f_fdir->mask = 0;
+
+ /*
+ * Use RSS in addition to Flow Director to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
+ adapter->num_tx_queues = f_fdir->indices;
+ adapter->num_rx_queues = f_fdir->indices;
+ ret = true;
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ }
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ f->indices = min_t(int, num_online_cpus(), f->indices);
+
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ e_info(probe, "FCoE enabled with RSS\n");
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_set_fdir_queues(adapter);
+ else
+ ixgbe_set_rss_queues(adapter);
+ }
+
+ /* adding FCoE rx rings to the end */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ adapter->num_tx_queues += f->indices;
+
+ return true;
+}
+#endif /* IXGBE_FCOE */
+
+/* Artificial max queue cap per traffic class in DCB mode */
+#define DCB_QUEUE_CAP 8
+
+#ifdef CONFIG_IXGBE_DCB
+static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ int per_tc_q, q, i, offset = 0;
+ struct net_device *dev = adapter->netdev;
+ int tcs = netdev_get_num_tc(dev);
+
+ if (!tcs)
+ return false;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
+ q = min_t(int, num_online_cpus(), per_tc_q);
+
+ for (i = 0; i < tcs; i++) {
+ netdev_set_tc_queue(dev, i, q, offset);
+ offset += q;
+ }
+
+ adapter->num_tx_queues = q * tcs;
+ adapter->num_rx_queues = q * tcs;
+
+#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and mask. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ u8 prio_tc[MAX_USER_PRIORITY] = {0};
+ int tc;
+ struct ixgbe_ring_feature *f =
+ &adapter->ring_feature[RING_F_FCOE];
+
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
+ tc = prio_tc[adapter->fcoe.up];
+ f->indices = dev->tc_to_txq[tc].count;
+ f->mask = dev->tc_to_txq[tc].offset;
+ }
+#endif
+
+ return true;
+}
+#endif
+
+/**
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ goto done;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_set_dcb_queues(adapter))
+ goto done;
+
+#endif
+#ifdef IXGBE_FCOE
+ if (ixgbe_set_fcoe_queues(adapter))
+ goto done;
+
+#endif /* IXGBE_FCOE */
+ if (ixgbe_set_fdir_queues(adapter))
+ goto done;
+
+ if (ixgbe_set_rss_queues(adapter))
+ goto done;
+
+ /* fallback to base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
+done:
+ if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
+ (adapter->netdev->reg_state == NETREG_UNREGISTERING))
+ return 0;
+
+ /* Notify the stack of the (possibly) reduced queue counts. */
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ return netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+}
+
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 2 (vector_threshold):
+ * 1) TxQ[0] + RxQ[0] handler
+ * 2) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /*
+ * The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI-X interrupts\n");
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = min(vectors,
+ adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ }
+}
+
+static void ixgbe_add_ring(struct ixgbe_ring *ring,
+ struct ixgbe_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: index of vector in adapter struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
+{
+ struct ixgbe_q_vector *q_vector;
+ struct ixgbe_ring *ring;
+ int node = -1;
+ int cpu = -1;
+ int ring_count, size;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct ixgbe_q_vector) +
+ (sizeof(struct ixgbe_ring) * ring_count);
+
+ /* customize cpu for Flow Director mapping */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
+ }
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc_node(size, GFP_KERNEL, node);
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* setup affinity mask and node */
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+ else
+ cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
+ q_vector->numa_node = node;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbe_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = v_idx;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ while (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbe_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ while (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ ixgbe_add_ring(ring, &q_vector->rx);
+
+ /*
+ * 82599 errata, UDP frames with a 0 checksum
+ * can be marked as checksum errors.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+
+ /* update count and index */
+ rxr_count--;
+ rxr_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
+{
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct ixgbe_ring *ring;
+
+ ixgbe_for_each_ring(ring, q_vector->tx)
+ adapter->tx_ring[ring->queue_index] = NULL;
+
+ ixgbe_for_each_ring(ring, q_vector->rx)
+ adapter->rx_ring[ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+
+ /*
+ * ixgbe_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
+
+ /* only one q_vector if MSI-X is disabled. */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+ err = ixgbe_alloc_q_vector(adapter, v_idx,
+ 0, 0, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ }
+ }
+
+ for (; q_vectors; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
+ err = ixgbe_alloc_q_vector(adapter, v_idx,
+ tqpv, txr_idx,
+ rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ txr_remaining -= tqpv;
+ txr_idx += tqpv;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx) {
+ v_idx--;
+ ixgbe_free_q_vector(adapter, v_idx);
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int v_idx, q_vectors;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ q_vectors = 1;
+
+ for (v_idx = 0; v_idx < q_vectors; v_idx++)
+ ixgbe_free_q_vector(adapter, v_idx);
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+ pci_disable_msi(adapter->pdev);
+ }
+}
+
+/**
+ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
+ */
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
+
+ /*
+ * At the same time, hardware can only support a maximum of
+ * hw.mac->max_msix_vectors vectors. With features
+ * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
+ * descriptor queues supported by our device. Thus, we cap it off in
+ * those rare cases where the cpu count also exceeds our vector limit.
+ */
+ v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ goto out;
+ }
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ e_err(probe,
+ "ATR is not supported while multiple "
+ "queues are disabled. Disabling Flow Director\n");
+ }
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (!err) {
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+ } else {
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI interrupt, "
+ "falling back to legacy. Error: %d\n", err);
+ /* reset err */
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ * - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ * - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
+
+ err = ixgbe_set_interrupt_capability(adapter);
+ if (err) {
+ e_dev_err("Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbe_alloc_q_vectors(adapter);
+ if (err) {
+ e_dev_err("Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ ixgbe_cache_ring_register(adapter);
+
+ e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ return 0;
+
+err_alloc_q_vectors:
+ ixgbe_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
+ ixgbe_free_q_vectors(adapter);
+ ixgbe_reset_interrupt_capability(adapter);
+}
+
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
+ u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3dc6cef58107..398fc223cab9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -55,8 +55,13 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
+#ifdef IXGBE_FCOE
char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
+#else
+static char ixgbe_default_device_descr[] =
+ "Intel(R) 10 Gigabit Network Connection";
+#endif
#define MAJ 3
#define MIN 6
#define BUILD 7
@@ -131,6 +136,11 @@ MODULE_PARM_DESC(max_vfs,
"Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
+static unsigned int allow_unsupported_sfp;
+module_param(allow_unsupported_sfp, uint, 0);
+MODULE_PARM_DESC(allow_unsupported_sfp,
+ "Allow unsupported and untested SFP+ modules on 82599-based adapters");
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -284,7 +294,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
struct ixgbe_reg_info *reginfo;
int n = 0;
struct ixgbe_ring *tx_ring;
- struct ixgbe_tx_buffer *tx_buffer_info;
+ struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
struct ixgbe_ring *rx_ring;
@@ -324,14 +334,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- tx_buffer_info =
- &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)tx_buffer_info->dma,
- tx_buffer_info->length,
- tx_buffer_info->next_to_watch,
- (u64)tx_buffer_info->time_stamp);
+ (u64)dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ tx_buffer->next_to_watch,
+ (u64)tx_buffer->time_stamp);
}
/* Print TX Rings */
@@ -361,18 +370,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
"leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+ tx_buffer = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
pr_info("T [0x%03X] %016llX %016llX %016llX"
" %04X %p %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)tx_buffer_info->dma,
- tx_buffer_info->length,
- tx_buffer_info->next_to_watch,
- (u64)tx_buffer_info->time_stamp,
- tx_buffer_info->skb);
+ (u64)dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ tx_buffer->next_to_watch,
+ (u64)tx_buffer->time_stamp,
+ tx_buffer->skb);
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
pr_cont(" NTC/U\n");
@@ -384,11 +393,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_cont("\n");
if (netif_msg_pktdata(adapter) &&
- tx_buffer_info->dma != 0)
+ dma_unmap_len(tx_buffer, len) != 0)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(tx_buffer_info->dma),
- tx_buffer_info->length, true);
+ phys_to_virt(dma_unmap_addr(tx_buffer,
+ dma)),
+ dma_unmap_len(tx_buffer, len),
+ true);
}
}
@@ -442,7 +453,7 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
rx_buffer_info = &rx_ring->rx_buffer_info[i];
- rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ rx_desc = IXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (staterr & IXGBE_RXD_STAT_DD) {
@@ -464,17 +475,7 @@ rx_ring_summary:
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
phys_to_virt(rx_buffer_info->dma),
- rx_ring->rx_buf_len, true);
-
- if (rx_ring->rx_buf_len
- < IXGBE_RXBUFFER_2K)
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(
- rx_buffer_info->page_dma +
- rx_buffer_info->page_offset
- ),
- PAGE_SIZE/2, true);
+ ixgbe_rx_bufsz(rx_ring), true);
}
}
@@ -584,32 +585,26 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
- struct ixgbe_tx_buffer *tx_buffer)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
+ struct ixgbe_tx_buffer *tx_buffer)
{
- if (tx_buffer->dma) {
- if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
- dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
- DMA_TO_DEVICE);
- else
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
- DMA_TO_DEVICE);
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- tx_buffer->dma = 0;
-}
-
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *tx_buffer_info)
-{
- ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
- if (tx_buffer_info->skb)
- dev_kfree_skb_any(tx_buffer_info->skb);
- tx_buffer_info->skb = NULL;
- /* tx_buffer_info must be completely set up in the transmit path */
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -666,7 +661,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
{
- return ring->tx_stats.completed;
+ return ring->stats.packets;
}
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
@@ -746,56 +741,88 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx.work_limit;
- u16 i = tx_ring->next_to_clean;
+ unsigned int i = tx_ring->next_to_clean;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return true;
tx_buffer = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
- for (; budget; budget--) {
+ do {
union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
- /* count the packet as being completed */
- tx_ring->tx_stats.completed++;
-
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
- /* prevent any other reads prior to eop_desc being verified */
- rmb();
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
- do {
- ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
- tx_desc->wb.status = 0;
- if (likely(tx_desc == eop_desc)) {
- eop_desc = NULL;
- dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
- total_bytes += tx_buffer->bytecount;
- total_packets += tx_buffer->gso_segs;
- }
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++;
- if (unlikely(i == tx_ring->count)) {
- i = 0;
-
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
tx_buffer = tx_ring->tx_buffer_info;
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
- } while (eop_desc);
- }
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
@@ -807,7 +834,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
struct ixgbe_hw *hw = &adapter->hw;
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
@@ -835,6 +861,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -842,9 +871,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
- !test_bit(__IXGBE_DOWN, &adapter->state)) {
- netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index)
+ && !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
}
}
@@ -853,63 +884,68 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
#ifdef CONFIG_IXGBE_DCA
-static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
+static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 rxctrl;
- u8 reg_idx = rx_ring->reg_idx;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+ u16 reg_offset;
- rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
+ reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
+ txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
break;
default:
- break;
+ /* for unknown hardware do not write register */
+ return;
}
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN |
+ IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+
+ IXGBE_WRITE_REG(hw, reg_offset, txctrl);
}
-static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txctrl;
- u8 reg_idx = tx_ring->reg_idx;
+ u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
+ u8 reg_idx = rx_ring->reg_idx;
+
switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(tx_ring->dev, cpu);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
- break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
break;
default:
break;
}
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_DCA_EN |
+ IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
@@ -921,10 +957,10 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
if (q_vector->cpu == cpu)
goto out_no_update;
- for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_update_tx_dca(adapter, ring, cpu);
- for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->rx)
ixgbe_update_rx_dca(adapter, ring, cpu);
q_vector->cpu = cpu;
@@ -984,14 +1020,17 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
return 0;
}
-#endif /* CONFIG_IXGBE_DCA */
-static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
+#endif /* CONFIG_IXGBE_DCA */
+static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
+#ifdef IXGBE_FCOE
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
* @adapter: address of board private structure
@@ -1010,73 +1049,45 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
}
-/**
- * ixgbe_receive_skb - Send a completed packet up the stack
- * @adapter: board private structure
- * @skb: packet to send up
- * @status: hardware indication of status of receive
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
- * @rx_desc: rx descriptor
- **/
-static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- struct ixgbe_ring *ring,
- union ixgbe_adv_rx_desc *rx_desc)
-{
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct napi_struct *napi = &q_vector->napi;
- bool is_vlan = (status & IXGBE_RXD_STAT_VP);
- u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-
- if (is_vlan && (tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, tag);
-
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(napi, skb);
- else
- netif_rx(skb);
-}
-
+#endif /* IXGBE_FCOE */
/**
* ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
- * @adapter: address of board private structure
- * @status_err: hardware indication of status of receive
+ * @ring: structure containing ring specific data
+ * @rx_desc: current Rx descriptor being processed
* @skb: skb currently being received and modified
- * @status_err: status error value of last descriptor in packet
**/
-static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
+static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb,
- u32 status_err)
+ struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Rx csum disabled */
- if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* if IP and error */
- if ((status_err & IXGBE_RXD_STAT_IPCS) &&
- (status_err & IXGBE_RXDADV_ERR_IPE)) {
- adapter->hw_csum_rx_error++;
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
+ ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
+ ring->rx_stats.csum_err++;
return;
}
- if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
return;
- if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
/*
* 82599 errata, UDP frames with a 0 checksum can be marked as
* checksum errors.
*/
- if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
- (adapter->hw.mac.type == ixgbe_mac_82599EB))
+ if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
+ test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
return;
- adapter->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
@@ -1086,6 +1097,10 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -1096,8 +1111,51 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
+static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma = bi->dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(dma))
+ return true;
+
+ /* alloc new page for storage */
+ if (likely(!page)) {
+ page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+ ixgbe_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+ bi->page = page;
+ }
+
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0,
+ ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ put_page(page);
+ bi->page = NULL;
+
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+
+ return true;
+}
+
/**
- * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
+ * ixgbe_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
@@ -1105,344 +1163,599 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
- struct sk_buff *skb;
u16 i = rx_ring->next_to_use;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev)
+ /* nothing to do */
+ if (!cleaned_count)
return;
- while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
- bi = &rx_ring->rx_buffer_info[i];
- skb = bi->skb;
+ rx_desc = IXGBE_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
- if (!skb) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len);
- if (!skb) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ do {
+ if (!ixgbe_alloc_mapped_page(rx_ring, bi))
+ break;
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- bi->dma = 0;
- goto no_buffers;
- }
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IXGBE_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
}
- if (ring_is_ps_enabled(rx_ring)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- goto no_buffers;
- }
- }
+ /* clear the hdr_addr for the next_to_use descriptor */
+ rx_desc->read.hdr_addr = 0;
- if (!bi->page_dma) {
- /* use a half page if we're re-using */
- bi->page_offset ^= PAGE_SIZE / 2;
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- bi->page_dma)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- bi->page_dma = 0;
- goto no_buffers;
- }
- }
+ cleaned_count--;
+ } while (cleaned_count);
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info. */
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- rx_desc->read.hdr_addr = 0;
- }
+ i += rx_ring->count;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (rx_ring->next_to_use != i)
+ ixgbe_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int ixgbe_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
}
-no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- ixgbe_release_rx_desc(rx_ring, i);
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv4->protocol;
+ hdr.network += hlen;
+#ifdef IXGBE_FCOE
+ } else if (protocol == __constant_htons(ETH_P_FCOE)) {
+ if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
+ return max_len;
+ hdr.network += FCOE_HEADER_LEN;
+#endif
+ } else {
+ return hdr.network - data;
}
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
+ */
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
}
-static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
+static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- /* HW will not DMA in data larger than the given buffer, even if it
- * parses the (NFS, of course) header to be larger. In that case, it
- * fills the header buffer and spills the rest into the page.
- */
- u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
- u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IXGBE_RX_HDR_SIZE)
- hlen = IXGBE_RX_HDR_SIZE;
- return hlen;
+ __le32 rsc_enabled;
+ u32 rsc_cnt;
+
+ if (!ring_is_rsc_enabled(rx_ring))
+ return;
+
+ rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+ cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+ /* If this is an RSC frame rsc_cnt should be non-zero */
+ if (!rsc_enabled)
+ return;
+
+ rsc_cnt = le32_to_cpu(rsc_enabled);
+ rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+
+ IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
+}
+
+static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
+ struct sk_buff *skb)
+{
+ u16 hdr_len = skb_headlen(skb);
+
+ /* set gso_size to avoid messing up TCP MSS */
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
+ IXGBE_CB(skb)->append_cnt);
+}
+
+static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ /* if append_cnt is 0 then frame is not RSC */
+ if (!IXGBE_CB(skb)->append_cnt)
+ return;
+
+ rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+
+ ixgbe_set_rsc_gso_size(rx_ring, skb);
+
+ /* gso_size is computed using append_cnt so always clear it last */
+ IXGBE_CB(skb)->append_cnt = 0;
+}
+
+/**
+ * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ ixgbe_update_rsc_stats(rx_ring, skb);
+
+ ixgbe_rx_hash(rx_ring, rx_desc, skb);
+
+ ixgbe_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+ u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(&q_vector->napi, skb);
+ else
+ netif_rx(skb);
}
/**
- * ixgbe_transform_rsc_queue - change rsc queue into a full packet
- * @skb: pointer to the last skb in the rsc queue
+ * ixgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
*
- * This function changes a queue full of hw rsc buffers into a completed
- * packet. It uses the ->prev pointers to find the first packet and then
- * turns it into the frag list owner.
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
**/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- unsigned int frag_list_size = 0;
- unsigned int skb_cnt = 1;
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IXGBE_RX_DESC(rx_ring, ntc));
+
+ if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ return false;
+
+ /* append_cnt indicates packet is RSC, if so fetch nextp */
+ if (IXGBE_CB(skb)->append_cnt) {
+ ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+ ntc &= IXGBE_RXDADV_NEXTP_MASK;
+ ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+ }
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_buffer_info[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * ixgbe_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right. These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ struct net_device *netdev = rx_ring->netdev;
+ unsigned char *va;
+ unsigned int pull_len;
+
+ /* if the page was released unmap it, else just sync our portion */
+ if (unlikely(IXGBE_CB(skb)->page_released)) {
+ dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ } else {
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ IXGBE_CB(skb)->dma,
+ frag->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+ IXGBE_CB(skb)->dma = 0;
+
+ /* verify that the packet does not have any known errors */
+ if (unlikely(ixgbe_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = skb_frag_size(frag);
+ if (pull_len > 256)
+ pull_len = ixgbe_get_headlen(va, pull_len);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ /*
+ * if we sucked the frag empty then we should free it,
+ * if there are other frags here something is screwed up in hardware
+ */
+ if (skb_frag_size(frag) == 0) {
+ BUG_ON(skb_shinfo(skb)->nr_frags != 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ __skb_frag_unref(frag);
+ skb->truesize -= ixgbe_rx_bufsz(rx_ring);
+ }
+
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
- while (skb->prev) {
- struct sk_buff *prev = skb->prev;
- frag_list_size += skb->len;
- skb->prev = NULL;
- skb = prev;
- skb_cnt++;
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
}
- skb_shinfo(skb)->frag_list = skb->next;
- skb->next = NULL;
- skb->len += frag_list_size;
- skb->data_len += frag_list_size;
- skb->truesize += frag_list_size;
- IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+ return false;
+}
+
+/**
+ * ixgbe_can_reuse_page - determine if we can reuse a page
+ * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
+ *
+ * Returns true if page can be reused in another Rx buffer
+ **/
+static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
+{
+ struct page *page = rx_buffer->page;
+
+ /* if we are only owner of page and it is local we can reuse it */
+ return likely(page_count(page) == 1) &&
+ likely(page_to_nid(page) == numa_node_id());
+}
+
+/**
+ * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Syncronizes page for reuse by the adapter
+ **/
+static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *old_buff)
+{
+ struct ixgbe_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+ u16 bufsz = ixgbe_rx_bufsz(rx_ring);
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->page = old_buff->page;
+ new_buff->dma = old_buff->dma;
- return skb;
+ /* flip page offset to other buffer and store to new_buff */
+ new_buff->page_offset = old_buff->page_offset ^ bufsz;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
+ new_buff->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(new_buff->page);
}
-static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+/**
+ * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function is based on skb_add_rx_frag. I would have used that
+ * function however it doesn't handle the truesize case correctly since we
+ * are allocating more memory than might be used for a single receive.
+ **/
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ struct sk_buff *skb, int size)
{
- return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer->page, rx_buffer->page_offset,
+ size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += ixgbe_rx_bufsz(rx_ring);
}
+/**
+ * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the syste.
+ *
+ * Returns true if all work is completed without reaching budget
+ **/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int budget)
{
- struct ixgbe_adapter *adapter = q_vector->adapter;
- union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
- struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
- struct sk_buff *skb;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- const int current_node = numa_node_id();
#ifdef IXGBE_FCOE
+ struct ixgbe_adapter *adapter = q_vector->adapter;
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
- u32 staterr;
- u16 i;
- u16 cleaned_count = 0;
- bool pkt_is_rsc = false;
-
- i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-
- while (staterr & IXGBE_RXD_STAT_DD) {
- u32 upper_len = 0;
-
- rmb(); /* read descriptor and rx_buffer_info after status DD */
-
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
- skb = rx_buffer_info->skb;
- rx_buffer_info->skb = NULL;
- prefetch(skb->data);
-
- if (ring_is_rsc_enabled(rx_ring))
- pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
-
- /* linear means we are building an skb from multiple pages */
- if (!skb_is_nonlinear(skb)) {
- u16 hlen;
- if (pkt_is_rsc &&
- !(staterr & IXGBE_RXD_STAT_EOP) &&
- !skb->prev) {
- /*
- * When HWRSC is enabled, delay unmapping
- * of the first packet. It carries the
- * header information, HW may still
- * access the header after the writeback.
- * Only unmap it when EOP is reached
- */
- IXGBE_RSC_CB(skb)->delay_unmap = true;
- IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
- } else {
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- }
- rx_buffer_info->dma = 0;
+ u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- if (ring_is_ps_enabled(rx_ring)) {
- hlen = ixgbe_get_hlen(rx_desc);
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- } else {
- hlen = le16_to_cpu(rx_desc->wb.upper.length);
- }
+ do {
+ struct ixgbe_rx_buffer *rx_buffer;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ struct page *page;
+ u16 ntc;
- skb_put(skb, hlen);
- } else {
- /* assume packet split since header is unmapped */
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
}
- if (upper_len) {
- dma_unmap_page(rx_ring->dev,
- rx_buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
-
- if ((page_count(rx_buffer_info->page) == 1) &&
- (page_to_nid(rx_buffer_info->page) == current_node))
- get_page(rx_buffer_info->page);
- else
- rx_buffer_info->page = NULL;
+ ntc = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
+ rx_buffer = &rx_ring->rx_buffer_info[ntc];
- skb->len += upper_len;
- skb->data_len += upper_len;
- skb->truesize += PAGE_SIZE / 2;
- }
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ break;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
- next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
- prefetch(next_rxd);
- cleaned_count++;
+ page = rx_buffer->page;
+ prefetchw(page);
- if (pkt_is_rsc) {
- u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
- IXGBE_RXDADV_NEXTP_SHIFT;
- next_buffer = &rx_ring->rx_buffer_info[nextp];
- } else {
- next_buffer = &rx_ring->rx_buffer_info[i];
- }
+ skb = rx_buffer->skb;
- if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- if (ring_is_ps_enabled(rx_ring)) {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- } else {
- skb->next = next_buffer->skb;
- skb->next->prev = skb;
- }
- rx_ring->rx_stats.non_eop_descs++;
- goto next_desc;
- }
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
- if (skb->prev) {
- skb = ixgbe_transform_rsc_queue(skb);
- /* if we got here without RSC the packet is invalid */
- if (!pkt_is_rsc) {
- __pskb_trim(skb, 0);
- rx_buffer_info->skb = skb;
- goto next_desc;
- }
- }
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
- if (ring_is_rsc_enabled(rx_ring)) {
- if (IXGBE_RSC_CB(skb)->delay_unmap) {
- dma_unmap_single(rx_ring->dev,
- IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- IXGBE_RSC_CB(skb)->dma = 0;
- IXGBE_RSC_CB(skb)->delay_unmap = false;
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IXGBE_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ break;
}
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ /*
+ * Delay unmapping of the first packet. It carries the
+ * header information, HW may still access the header
+ * after the writeback. Only unmap it when EOP is
+ * reached
+ */
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
+ } else {
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
}
- if (pkt_is_rsc) {
- if (ring_is_ps_enabled(rx_ring))
- rx_ring->rx_stats.rsc_count +=
- skb_shinfo(skb)->nr_frags;
- else
- rx_ring->rx_stats.rsc_count +=
- IXGBE_RSC_CB(skb)->skb_cnt;
- rx_ring->rx_stats.rsc_flush++;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
- dev_kfree_skb_any(skb);
- goto next_desc;
+ /* pull page into skb */
+ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
+ le16_to_cpu(rx_desc->wb.upper.length));
+
+ if (ixgbe_can_reuse_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+ } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ /* the page has been released from the ring */
+ IXGBE_CB(skb)->page_released = true;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
}
- ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
- if (adapter->netdev->features & NETIF_F_RXHASH)
- ixgbe_rx_hash(rx_desc, skb);
+ /* clear contents of buffer_info */
+ rx_buffer->skb = NULL;
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+
+ cleaned_count++;
+
+ /* place incomplete frames back on ring for completion */
+ if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ continue;
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* populate checksum, timestamp, VLAN, and protocol */
+ ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
- ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
- staterr);
+ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
if (!ddp_bytes) {
dev_kfree_skb_any(skb);
- goto next_desc;
+ continue;
}
}
+
#endif /* IXGBE_FCOE */
- ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ ixgbe_rx_skb(q_vector, skb);
+ /* update budget accounting */
budget--;
-next_desc:
- rx_desc->wb.upper.status_error = 0;
-
- if (!budget)
- break;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
- ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- }
-
- rx_ring->next_to_clean = i;
- cleaned_count = ixgbe_desc_unused(rx_ring);
-
- if (cleaned_count)
- ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+ } while (likely(budget));
#ifdef IXGBE_FCOE
/* include DDPed FCoE data */
@@ -1457,8 +1770,8 @@ next_desc:
total_rx_bytes += ddp_bytes;
total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
}
-#endif /* IXGBE_FCOE */
+#endif /* IXGBE_FCOE */
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1466,6 +1779,9 @@ next_desc:
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
+ if (cleaned_count)
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+
return !!budget;
}
@@ -1498,10 +1814,10 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
- for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->rx)
ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
- for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
if (q_vector->tx.ring && !q_vector->rx.ring) {
@@ -1569,20 +1885,19 @@ enum latency_range {
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
{
- u64 bytes_perint;
- struct ixgbe_adapter *adapter = q_vector->adapter;
int bytes = ring_container->total_bytes;
int packets = ring_container->total_packets;
u32 timepassed_us;
+ u64 bytes_perint;
u8 itr_setting = ring_container->itr;
if (packets == 0)
return;
/* simple throttlerate management
- * 0-20MB/s lowest (100000 ints/s)
- * 20-100MB/s low (20000 ints/s)
- * 100-1249MB/s bulk (8000 ints/s)
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
@@ -1590,17 +1905,17 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
+ if (bytes_perint > 10)
itr_setting = low_latency;
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
+ if (bytes_perint > 20)
itr_setting = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
+ else if (bytes_perint <= 10)
itr_setting = lowest_latency;
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
+ if (bytes_perint <= 20)
itr_setting = low_latency;
break;
}
@@ -1626,7 +1941,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
int v_idx = q_vector->v_idx;
- u32 itr_reg = q_vector->itr;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
@@ -1678,14 +1993,14 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
((9 * new_itr) + q_vector->itr);
/* save the algorithm value here */
- q_vector->itr = new_itr & IXGBE_MAX_EITR;
+ q_vector->itr = new_itr;
ixgbe_write_eitr(q_vector);
}
}
/**
- * ixgbe_check_overtemp_subtask - check for over tempurature
+ * ixgbe_check_overtemp_subtask - check for over temperature
* @adapter: pointer to adapter
**/
static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
@@ -1997,76 +2312,53 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED;
}
-static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
-{
- struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
- struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- q_vector->rx.count++;
-}
-
-static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int t_idx)
-{
- struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
- struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- q_vector->tx.count++;
- q_vector->tx.work_limit = a->tx_work_limit;
-}
-
/**
- * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
- * @adapter: board private structure to initialize
+ * ixgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
*
- * This function maps descriptor rings to the queue-specific vectors
- * we were allotted through the MSI-X enabling code. Ideally, we'd have
- * one vector per ring/queue, but on a constrained vector budget, we
- * group the rings as "efficiently" as possible. You would add new
- * mapping configurations in here.
+ * This function is used for legacy and MSI, NAPI mode
**/
-static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+int ixgbe_poll(struct napi_struct *napi, int budget)
{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
- int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
- int v_start = 0;
+ struct ixgbe_q_vector *q_vector =
+ container_of(napi, struct ixgbe_q_vector, napi);
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
- /* only one q_vector if MSI-X is disabled. */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
- /*
- * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
- * group them so there are multiple queues per vector.
- *
- * Re-adjusting *qpv takes care of the remainder.
- */
- for (; v_start < q_vectors && rxr_remaining; v_start++) {
- int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
- for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
- map_vector_to_rxq(adapter, v_start, rxr_idx);
- }
+ ixgbe_for_each_ring(ring, q_vector->tx)
+ clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
- /*
- * If there are not enough q_vectors for each ring to have it's own
- * vector then we must pair up Rx/Tx on a each vector
- */
- if ((v_start + txr_remaining) > q_vectors)
- v_start = 0;
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
- for (; v_start < q_vectors && txr_remaining; v_start++) {
- int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
- for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
- map_vector_to_txq(adapter, v_start, txr_idx);
- }
+ ixgbe_for_each_ring(ring, q_vector->rx)
+ clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+
+ return 0;
}
/**
@@ -2112,14 +2404,14 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
irq_set_affinity_hint(entry->vector,
- q_vector->affinity_mask);
+ &q_vector->affinity_mask);
}
}
err = request_irq(adapter->msix_entries[vector].vector,
ixgbe_msix_other, 0, netdev->name, adapter);
if (err) {
- e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
+ e_err(probe, "request_irq for msix_other failed: %d\n", err);
goto free_queue_irqs;
}
@@ -2153,7 +2445,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
u32 eicr;
/*
- * Workaround for silicon errata on 82598. Mask the interrupts
+ * Workaround for silicon errata #26 on 82598. Mask the interrupt
* before the read of EICR.
*/
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2193,47 +2485,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
- if (napi_schedule_prep(&(q_vector->napi))) {
- /* would disable interrupts here but EIAM disabled it */
- __napi_schedule(&(q_vector->napi));
- }
+ /* would disable interrupts here but EIAM disabled it */
+ napi_schedule(&q_vector->napi);
/*
* re-enable link(maybe) and non-queue interrupts, no flush.
* ixgbe_poll will re-enable the queue interrupts
*/
-
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter, false, false);
return IRQ_HANDLED;
}
-static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
-{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- int i;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i]->q_vector = NULL;
- adapter->rx_ring[i]->next = NULL;
- }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i]->q_vector = NULL;
- adapter->tx_ring[i]->next = NULL;
- }
-
- for (i = 0; i < q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
- memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
- }
-}
-
/**
* ixgbe_request_irq - initialize interrupts
* @adapter: board private structure
@@ -2246,9 +2510,6 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
- /* map all of the rings to the q_vectors */
- ixgbe_map_rings_to_vectors(adapter);
-
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
err = ixgbe_request_msix_irqs(adapter);
else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
@@ -2258,13 +2519,9 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, adapter);
- if (err) {
+ if (err)
e_err(probe, "request_irq failed, Error %d\n", err);
- /* place q_vectors and rings back into a known good state */
- ixgbe_reset_q_vectors(adapter);
- }
-
return err;
}
@@ -2294,9 +2551,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
} else {
free_irq(adapter->pdev->irq, adapter);
}
-
- /* clear q_vector state information */
- ixgbe_reset_q_vectors(adapter);
}
/**
@@ -2387,12 +2641,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
- if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
+ if (!ring->q_vector || (ring->q_vector->itr < 8))
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
- /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
+ /*
+ * Setting PTHRESH to 32 both improves performance
+ * and avoids a TX hang with DFP enabled
+ */
txdctl |= (1 << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
@@ -2411,6 +2668,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
+ netdev_tx_reset_queue(txring_txq(ring));
+
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -2527,18 +2786,12 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (ring_is_ps_enabled(rx_ring)) {
-#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
- srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
+ srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
- srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else {
- srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- }
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
@@ -2608,6 +2861,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
| IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
@@ -2621,13 +2879,11 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rscctrl;
- int rx_buf_len;
u8 reg_idx = ring->reg_idx;
if (!ring_is_rsc_enabled(ring))
return;
- rx_buf_len = ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
@@ -2635,24 +2891,13 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65536
*/
- if (ring_is_ps_enabled(ring)) {
-#if (PAGE_SIZE < 8192)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE < 16384)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (PAGE_SIZE < 32768)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#if (PAGE_SIZE <= 8192)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (PAGE_SIZE <= 16384)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
#else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
#endif
- } else {
- if (rx_buf_len <= IXGBE_RXBUFFER_4K)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- }
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
@@ -2869,23 +3114,10 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int rx_buf_len;
struct ixgbe_ring *rx_ring;
int i;
u32 mhadd, hlreg0;
- /* Decide whether to use packet split mode or not */
- /* On by default */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-
- /* Do not use packet split if we're in SR-IOV Mode */
- if (adapter->num_vfs)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-
- /* Disable packet split due to 82599 erratum #45 */
- if (hw->mac.type == ixgbe_mac_82599EB)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-
#ifdef IXGBE_FCOE
/* adjust max frame to be able to do baby jumbo for FCoE */
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -2904,27 +3136,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
/* MHADD will allow an extra 4 bytes past for vlan tagged frames */
max_frame += VLAN_HLEN;
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_buf_len = IXGBE_RX_HDR_SIZE;
- } else {
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- /*
- * Make best use of allocation by using all but 1K of a
- * power of 2 allocation that will be used for skb->head.
- */
- else if (max_frame <= IXGBE_RXBUFFER_3K)
- rx_buf_len = IXGBE_RXBUFFER_3K;
- else if (max_frame <= IXGBE_RXBUFFER_7K)
- rx_buf_len = IXGBE_RXBUFFER_7K;
- else if (max_frame <= IXGBE_RXBUFFER_15K)
- rx_buf_len = IXGBE_RXBUFFER_15K;
- else
- rx_buf_len = IXGBE_MAX_RXBUFFER;
- }
-
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -2936,32 +3147,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
- rx_ring->rx_buf_len = rx_buf_len;
-
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
- set_ring_ps_enabled(rx_ring);
- else
- clear_ring_ps_enabled(rx_ring);
-
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
else
clear_ring_rsc_enabled(rx_ring);
-
#ifdef IXGBE_FCOE
if (netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((i >= f->mask) && (i < f->mask + f->indices)) {
- clear_ring_ps_enabled(rx_ring);
- if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
- rx_ring->rx_buf_len =
- IXGBE_FCOE_JUMBO_FRAME_SIZE;
- } else if (!ring_is_rsc_enabled(rx_ring) &&
- !ring_is_ps_enabled(rx_ring)) {
- rx_ring->rx_buf_len =
- IXGBE_FCOE_JUMBO_FRAME_SIZE;
- }
+ if ((i >= f->mask) && (i < f->mask + f->indices))
+ set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
}
#endif /* IXGBE_FCOE */
}
@@ -3235,6 +3430,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
/* set all bits that we expect to always be set */
+ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
fctrl |= IXGBE_FCTRL_BAM;
fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
fctrl |= IXGBE_FCTRL_PMCF;
@@ -3283,6 +3479,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
}
+ /* This is useful for sniffing bad packets. */
+ if (adapter->netdev->features & NETIF_F_RXALL) {
+ /* UPE and MPE will be handled by normal PROMISC logic
+ * in e1000e_set_rx_mode */
+ fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
+ IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
+ IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
+
+ fctrl &= ~(IXGBE_FCTRL_DPF);
+ /* NOTE: VLAN filtering is disabled by setting PROMISC */
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_RX)
@@ -3554,6 +3762,8 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+
ixgbe_configure_pb(adapter);
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
@@ -3567,6 +3777,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_fcoe(adapter);
#endif /* IXGBE_FCOE */
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hw->mac.ops.disable_rx_buff(hw);
+ break;
+ default:
+ break;
+ }
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ixgbe_init_fdir_signature_82599(&adapter->hw,
adapter->fdir_pballoc);
@@ -3576,6 +3796,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_fdir_filter_restore(adapter);
}
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hw->mac.ops.enable_rx_buff(hw);
+ break;
+ default:
+ break;
+ }
+
ixgbe_configure_virtualization(adapter);
ixgbe_configure_tx(adapter);
@@ -3849,6 +4078,27 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
+ * @rx_ring: ring to setup
+ *
+ * On many IA platforms the L1 cache has a critical stride of 4K, this
+ * results in each receive buffer starting in the same cache set. To help
+ * reduce the pressure on this cache set we can interleave the offsets so
+ * that only every other buffer will be in the same cache set.
+ **/
+static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
+ u16 i;
+
+ for (i = 0; i < rx_ring->count; i += 2) {
+ rx_buffer[0].page_offset = 0;
+ rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
+ rx_buffer = &rx_buffer[2];
+ }
+}
+
+/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
@@ -3864,50 +4114,40 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer_info;
-
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
- if (rx_buffer_info->dma) {
- dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
- }
- if (rx_buffer_info->skb) {
- struct sk_buff *skb = rx_buffer_info->skb;
- rx_buffer_info->skb = NULL;
- do {
- struct sk_buff *this = skb;
- if (IXGBE_RSC_CB(this)->delay_unmap) {
- dma_unmap_single(dev,
- IXGBE_RSC_CB(this)->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- IXGBE_RSC_CB(this)->dma = 0;
- IXGBE_RSC_CB(skb)->delay_unmap = false;
- }
- skb = skb->prev;
- dev_kfree_skb(this);
- } while (skb);
- }
- if (!rx_buffer_info->page)
- continue;
- if (rx_buffer_info->page_dma) {
- dma_unmap_page(dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
+ struct ixgbe_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+ if (IXGBE_CB(skb)->page_released) {
+ dma_unmap_page(dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ }
+ dev_kfree_skb(skb);
}
- put_page(rx_buffer_info->page);
- rx_buffer_info->page = NULL;
- rx_buffer_info->page_offset = 0;
+ rx_buffer->skb = NULL;
+ if (rx_buffer->dma)
+ dma_unmap_page(dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+ if (rx_buffer->page)
+ put_page(rx_buffer->page);
+ rx_buffer->page = NULL;
}
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size);
+ ixgbe_init_rx_page_offset(rx_ring);
+
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -4073,55 +4313,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_poll - NAPI Rx polling callback
- * @napi: structure for representing this polling device
- * @budget: how many packets driver is allowed to clean
- *
- * This function is used for legacy and MSI, NAPI mode
- **/
-static int ixgbe_poll(struct napi_struct *napi, int budget)
-{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *ring;
- int per_ring_budget;
- bool clean_complete = true;
-
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_dca(q_vector);
-#endif
-
- for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
-
- /* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
- if (q_vector->rx.count > 1)
- per_ring_budget = max(budget/q_vector->rx.count, 1);
- else
- per_ring_budget = budget;
-
- for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
- clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
- per_ring_budget);
-
- /* If all work not completed, return budget and keep polling */
- if (!clean_complete)
- return budget;
-
- /* all work done, exit the polling mode */
- napi_complete(napi);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
-
- return 0;
-}
-
-/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
@@ -4134,802 +4325,6 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
}
/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
- * @adapter: board private structure to initialize
- *
- * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
- *
- **/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- f->mask = 0xF;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
- } else {
- ret = false;
- }
-
- return ret;
-}
-
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session. This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
-
- f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
- f_fdir->mask = 0;
-
- /* Flow Director must have RSS enabled */
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- adapter->num_tx_queues = f_fdir->indices;
- adapter->num_rx_queues = f_fdir->indices;
- ret = true;
- } else {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- }
- return ret;
-}
-
-#ifdef IXGBE_FCOE
-/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
- * @adapter: board private structure to initialize
- *
- * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
- *
- **/
-static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
-
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- return false;
-
- f->indices = min((int)num_online_cpus(), f->indices);
-
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- e_info(probe, "FCoE enabled with RSS\n");
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
- }
-
- /* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
-
- return true;
-}
-#endif /* IXGBE_FCOE */
-
-/* Artificial max queue cap per traffic class in DCB mode */
-#define DCB_QUEUE_CAP 8
-
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
-{
- int per_tc_q, q, i, offset = 0;
- struct net_device *dev = adapter->netdev;
- int tcs = netdev_get_num_tc(dev);
-
- if (!tcs)
- return false;
-
- /* Map queue offset and counts onto allocated tx queues */
- per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
- q = min((int)num_online_cpus(), per_tc_q);
-
- for (i = 0; i < tcs; i++) {
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
- }
-
- adapter->num_tx_queues = q * tcs;
- adapter->num_rx_queues = q * tcs;
-
-#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration indexed
- * by feature specific indices and mask. Here we map FCoE
- * indices onto the DCB queue pairs allowing FCoE to own
- * configuration later.
- */
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- int tc;
- struct ixgbe_ring_feature *f =
- &adapter->ring_feature[RING_F_FCOE];
-
- tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
- f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
- }
-#endif
-
- return true;
-}
-#endif
-
-/**
- * ixgbe_set_sriov_queues: Allocate queues for IOV use
- * @adapter: board private structure to initialize
- *
- * IOV doesn't actually use anything, so just NAK the
- * request for now and let the other queue routines
- * figure out what to do.
- */
-static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
-{
- return false;
-}
-
-/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
- * @adapter: board private structure to initialize
- *
- * This is the top level queue allocation routine. The order here is very
- * important, starting with the "most" number of features turned on at once,
- * and ending with the smallest set of features. This way large combinations
- * can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
- *
- **/
-static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
-{
- /* Start with base case */
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
-
- if (ixgbe_set_sriov_queues(adapter))
- goto done;
-
-#ifdef CONFIG_IXGBE_DCB
- if (ixgbe_set_dcb_queues(adapter))
- goto done;
-
-#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_set_fcoe_queues(adapter))
- goto done;
-
-#endif /* IXGBE_FCOE */
- if (ixgbe_set_fdir_queues(adapter))
- goto done;
-
- if (ixgbe_set_rss_queues(adapter))
- goto done;
-
- /* fallback to base case */
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
-
-done:
- if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
- (adapter->netdev->reg_state == NETREG_UNREGISTERING))
- return 0;
-
- /* Notify the stack of the (possibly) reduced queue counts. */
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- return netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
-}
-
-static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
- int vectors)
-{
- int err, vector_threshold;
-
- /* We'll want at least 3 (vector_threshold):
- * 1) TxQ[0] Cleanup
- * 2) RxQ[0] Cleanup
- * 3) Other (Link Status Change, etc.)
- * 4) TCP Timer (optional)
- */
- vector_threshold = MIN_MSIX_COUNT;
-
- /* The more we get, the more we will assign to Tx/Rx Cleanup
- * for the separate queues...where Rx Cleanup >= Tx Cleanup.
- * Right now, we simply care about how many we'll get; we'll
- * set them up later while requesting irq's.
- */
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold) {
- /* Can't allocate enough MSI-X interrupts? Oh well.
- * This just means we'll go with either a single MSI
- * vector or fall back to legacy interrupts.
- */
- netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI-X interrupts\n");
- adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else {
- adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
- }
-}
-
-/**
- * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for RSS to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
-{
- int i;
-
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return false;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
-
- return true;
-}
-
-#ifdef CONFIG_IXGBE_DCB
-
-/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
-static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
- unsigned int *tx, unsigned int *rx)
-{
- struct net_device *dev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
- u8 num_tcs = netdev_get_num_tc(dev);
-
- *tx = 0;
- *rx = 0;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- *tx = tc << 2;
- *rx = tc << 3;
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- if (num_tcs > 4) {
- if (tc < 3) {
- *tx = tc << 5;
- *rx = tc << 4;
- } else if (tc < 5) {
- *tx = ((tc + 2) << 4);
- *rx = tc << 4;
- } else if (tc < num_tcs) {
- *tx = ((tc + 8) << 3);
- *rx = tc << 4;
- }
- } else {
- *rx = tc << 5;
- switch (tc) {
- case 0:
- *tx = 0;
- break;
- case 1:
- *tx = 64;
- break;
- case 2:
- *tx = 96;
- break;
- case 3:
- *tx = 112;
- break;
- default:
- break;
- }
- }
- break;
- default:
- break;
- }
-}
-
-/**
- * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for DCB to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
-{
- struct net_device *dev = adapter->netdev;
- int i, j, k;
- u8 num_tcs = netdev_get_num_tc(dev);
-
- if (!num_tcs)
- return false;
-
- for (i = 0, k = 0; i < num_tcs; i++) {
- unsigned int tx_s, rx_s;
- u16 count = dev->tc_to_txq[i].count;
-
- ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
- for (j = 0; j < count; j++, k++) {
- adapter->tx_ring[k]->reg_idx = tx_s + j;
- adapter->rx_ring[k]->reg_idx = rx_s + j;
- adapter->tx_ring[k]->dcb_tc = i;
- adapter->rx_ring[k]->dcb_tc = i;
- }
- }
-
- return true;
-}
-#endif
-
-/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
-{
- int i;
- bool ret = false;
-
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
- }
-
- return ret;
-}
-
-#ifdef IXGBE_FCOE
-/**
- * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
- *
- */
-static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- int i;
- u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
-
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- return false;
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
-
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
- return true;
-}
-
-#endif /* IXGBE_FCOE */
-/**
- * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
- * @adapter: board private structure to initialize
- *
- * SR-IOV doesn't use any descriptor rings but changes the default if
- * no other mapping is used.
- *
- */
-static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
-{
- adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
- adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
- if (adapter->num_vfs)
- return true;
- else
- return false;
-}
-
-/**
- * ixgbe_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- *
- * Note, the order the various feature calls is important. It must start with
- * the "most" features enabled at the same time, then trickle down to the
- * least amount of features turned on at once.
- **/
-static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
-{
- /* start with default case */
- adapter->rx_ring[0]->reg_idx = 0;
- adapter->tx_ring[0]->reg_idx = 0;
-
- if (ixgbe_cache_ring_sriov(adapter))
- return;
-
-#ifdef CONFIG_IXGBE_DCB
- if (ixgbe_cache_ring_dcb(adapter))
- return;
-#endif
-
-#ifdef IXGBE_FCOE
- if (ixgbe_cache_ring_fcoe(adapter))
- return;
-#endif /* IXGBE_FCOE */
-
- if (ixgbe_cache_ring_fdir(adapter))
- return;
-
- if (ixgbe_cache_ring_rss(adapter))
- return;
-}
-
-/**
- * ixgbe_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time. The polling_netdev array is
- * intended for Multiqueue, but should work fine with a single queue.
- **/
-static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
-{
- int rx = 0, tx = 0, nid = adapter->node;
-
- if (nid < 0 || !node_online(nid))
- nid = first_online_node;
-
- for (; tx < adapter->num_tx_queues; tx++) {
- struct ixgbe_ring *ring;
-
- ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
- if (!ring)
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = tx;
- ring->numa_node = nid;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- adapter->tx_ring[tx] = ring;
- }
-
- for (; rx < adapter->num_rx_queues; rx++) {
- struct ixgbe_ring *ring;
-
- ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
- if (!ring)
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = rx;
- ring->numa_node = nid;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- adapter->rx_ring[rx] = ring;
- }
-
- ixgbe_cache_ring_register(adapter);
-
- return 0;
-
-err_allocation:
- while (tx)
- kfree(adapter->tx_ring[--tx]);
-
- while (rx)
- kfree(adapter->rx_ring[--rx]);
- return -ENOMEM;
-}
-
-/**
- * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
- * @adapter: board private structure to initialize
- *
- * Attempt to configure the interrupts using the best available
- * capabilities of the hardware and the kernel.
- **/
-static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
- int vector, v_budget;
-
- /*
- * It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) the same number of vectors as there are CPU's.
- */
- v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)num_online_cpus()) + NON_Q_VECTORS;
-
- /*
- * At the same time, hardware can only support a maximum of
- * hw.mac->max_msix_vectors vectors. With features
- * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
- * descriptor queues supported by our device. Thus, we cap it off in
- * those rare cases where the cpu count also exceeds our vector limit.
- */
- v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
-
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter. */
- adapter->msix_entries = kcalloc(v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
- if (adapter->msix_entries) {
- for (vector = 0; vector < v_budget; vector++)
- adapter->msix_entries[vector].entry = vector;
-
- ixgbe_acquire_msix_vectors(adapter, v_budget);
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- goto out;
- }
-
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- e_err(probe,
- "ATR is not supported while multiple "
- "queues are disabled. Disabling Flow Director\n");
- }
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 0;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
-
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
-
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
- } else {
- netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
- /* reset err */
- err = 0;
- }
-
-out:
- return err;
-}
-
-/**
- * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- **/
-static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
-{
- int v_idx, num_q_vectors;
- struct ixgbe_q_vector *q_vector;
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL, adapter->node);
- if (!q_vector)
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL);
- if (!q_vector)
- goto err_out;
-
- q_vector->adapter = adapter;
- q_vector->v_idx = v_idx;
-
- /* Allocate the affinity_hint cpumask, configure the mask */
- if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto err_out;
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
- }
-
- return 0;
-
-err_out:
- while (v_idx) {
- v_idx--;
- q_vector = adapter->q_vector[v_idx];
- netif_napi_del(&q_vector->napi);
- free_cpumask_var(q_vector->affinity_mask);
- kfree(q_vector);
- adapter->q_vector[v_idx] = NULL;
- }
- return -ENOMEM;
-}
-
-/**
- * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- **/
-static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
-{
- int v_idx, num_q_vectors;
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- netif_napi_del(&q_vector->napi);
- free_cpumask_var(q_vector->affinity_mask);
- kfree(q_vector);
- }
-}
-
-static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
-{
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
- adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
- pci_disable_msi(adapter->pdev);
- }
-}
-
-/**
- * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
- * @adapter: board private structure to initialize
- *
- * We determine which interrupt scheme to use based on...
- * - Kernel support (MSI, MSI-X)
- * - which can be user-defined (via MODULE_PARAM)
- * - Hardware queue count (num_*_queues)
- * - defined by miscellaneous hardware support/features (RSS, etc.)
- **/
-int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
-{
- int err;
-
- /* Number of supported queues */
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
-
- err = ixgbe_set_interrupt_capability(adapter);
- if (err) {
- e_dev_err("Unable to setup interrupt capabilities\n");
- goto err_set_interrupt;
- }
-
- err = ixgbe_alloc_q_vectors(adapter);
- if (err) {
- e_dev_err("Unable to allocate memory for queue vectors\n");
- goto err_alloc_q_vectors;
- }
-
- err = ixgbe_alloc_queues(adapter);
- if (err) {
- e_dev_err("Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
- (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
- adapter->num_rx_queues, adapter->num_tx_queues);
-
- set_bit(__IXGBE_DOWN, &adapter->state);
-
- return 0;
-
-err_alloc_queues:
- ixgbe_free_q_vectors(adapter);
-err_alloc_q_vectors:
- ixgbe_reset_interrupt_capability(adapter);
-err_set_interrupt:
- return err;
-}
-
-/**
- * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
- * @adapter: board private structure to clear interrupt scheme on
- *
- * We go through and clear interrupt specific resources and reset the structure
- * to pre-load conditions
- **/
-void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
-
- /* ixgbe_get_stats64() might access this ring, we must wait
- * a grace period before freeing it.
- */
- kfree_rcu(ring, rcu);
- adapter->rx_ring[i] = NULL;
- }
-
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
-
- ixgbe_free_q_vectors(adapter);
- ixgbe_reset_interrupt_capability(adapter);
-}
-
-/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
*
@@ -4956,7 +4351,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
/* Set capability flags */
- rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) {
@@ -5048,10 +4443,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
- /* set defaults for eitr in MegaBytes */
- adapter->eitr_low = 10;
- adapter->eitr_high = 20;
-
/* set default ring sizes */
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
@@ -5065,12 +4456,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
return -EIO;
}
- /* enable rx csum by default */
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
- /* get assigned NUMA node */
- adapter->node = dev_to_node(&pdev->dev);
-
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -5085,10 +4470,16 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = -1;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
+
+ if (tx_ring->q_vector)
+ numa_node = tx_ring->q_vector->numa_node;
+
+ tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
@@ -5098,8 +4489,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ set_dev_node(dev, numa_node);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!tx_ring->desc)
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -5148,10 +4546,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = -1;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
+
+ if (rx_ring->q_vector)
+ numa_node = rx_ring->q_vector->numa_node;
+
+ rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
@@ -5161,15 +4565,23 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
+ set_dev_node(dev, numa_node);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!rx_ring->desc)
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ ixgbe_init_rx_page_offset(rx_ring);
+
return 0;
err:
vfree(rx_ring->rx_buffer_info);
@@ -5289,20 +4701,24 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
- hw->mac.type != ixgbe_mac_X540) {
- if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
- return -EINVAL;
- } else {
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ return -EINVAL;
+
+ /*
+ * For 82599EB we cannot allow PF to change MTU greater than 1500
+ * in SR-IOV mode as it may cause buffer overruns in guest VFs that
+ * don't allocate and chain buffers correctly.
+ */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
+ (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
return -EINVAL;
- }
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
@@ -5558,7 +4974,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
- u64 bytes = 0, packets = 0;
+ u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
#ifdef IXGBE_FCOE
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
unsigned int cpu;
@@ -5588,12 +5004,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ hw_csum_rx_error += rx_ring->rx_stats.csum_err;
bytes += rx_ring->stats.bytes;
packets += rx_ring->stats.packets;
}
adapter->non_eop_descs = non_eop_descs;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ adapter->hw_csum_rx_error = hw_csum_rx_error;
netdev->stats.rx_bytes = bytes;
netdev->stats.rx_packets = packets;
@@ -5945,7 +5363,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
* print link down message
* @adapter - pointer to the adapter structure
**/
-static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
@@ -6190,41 +5608,32 @@ static void ixgbe_service_timer(unsigned long data)
unsigned long next_event_offset;
bool ready = true;
-#ifdef CONFIG_PCI_IOV
- ready = false;
+ /* poll faster when waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ next_event_offset = HZ / 10;
+ else
+ next_event_offset = HZ * 2;
+#ifdef CONFIG_PCI_IOV
/*
* don't bother with SR-IOV VF DMA hang check if there are
* no VFs or the link is down
*/
if (!adapter->num_vfs ||
- (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
- ready = true;
+ (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
goto normal_timer_service;
- }
/* If we have VFs allocated then we must check for DMA hangs */
ixgbe_check_for_bad_vf(adapter);
next_event_offset = HZ / 50;
adapter->timer_event_accumulator++;
- if (adapter->timer_event_accumulator >= 100) {
- ready = true;
+ if (adapter->timer_event_accumulator >= 100)
adapter->timer_event_accumulator = 0;
- }
-
- goto schedule_event;
-
-normal_timer_service:
-#endif
- /* poll faster when waiting for link */
- if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
- next_event_offset = HZ / 10;
else
- next_event_offset = HZ * 2;
+ ready = false;
-#ifdef CONFIG_PCI_IOV
-schedule_event:
+normal_timer_service:
#endif
/* Reset the timer */
mod_timer(&adapter->service_timer, next_event_offset + jiffies);
@@ -6273,30 +5682,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
}
-void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
- u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
-{
- struct ixgbe_adv_tx_context_desc *context_desc;
- u16 i = tx_ring->next_to_use;
-
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
-
- i++;
- tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
-
- /* set bits to identify this as an advanced context descriptor */
- type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-}
-
-static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len)
+static int ixgbe_tso(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ u8 *hdr_len)
{
- int err;
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
@@ -6304,7 +5694,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
return 0;
if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
return err;
}
@@ -6312,7 +5702,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == __constant_htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -6321,17 +5711,27 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM |
+ IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM;
}
+ /* compute header lengths */
l4len = tcp_hdrlen(skb);
*hdr_len = skb_transport_offset(skb) + l4len;
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -6340,29 +5740,29 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
- mss_l4len_idx);
+ mss_l4len_idx);
return 1;
}
-static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- __be16 protocol)
+static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(tx_flags & IXGBE_TX_FLAGS_TXSW))
- return false;
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+ return;
} else {
u8 l4_hdr = 0;
- switch (protocol) {
+ switch (first->protocol) {
case __constant_htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -6376,7 +5776,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n",
- skb->protocol);
+ first->protocol);
}
break;
}
@@ -6400,19 +5800,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but l4 proto=%x!\n",
- skb->protocol);
+ l4_hdr);
}
break;
}
+
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
+ /* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
type_tucmd, mss_l4len_idx);
-
- return (skb->ip_summed == CHECKSUM_PARTIAL);
}
static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
@@ -6428,7 +5830,7 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
/* set segmentation enable bits for TSO/FSO */
#ifdef IXGBE_FCOE
- if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
+ if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
#else
if (tx_flags & IXGBE_TX_FLAGS_TSO)
#endif
@@ -6437,200 +5839,192 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
return cmd_type;
}
-static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
+static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
{
- __le32 olinfo_status =
- cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
-
- if (tx_flags & IXGBE_TX_FLAGS_TSO) {
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
- (1 << IXGBE_ADVTXD_IDX_SHIFT));
- /* enble IPv4 checksum for TSO */
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- }
+ __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
/* enable L4 checksum for TSO and TX checksum offload */
if (tx_flags & IXGBE_TX_FLAGS_CSUM)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
-#ifdef IXGBE_FCOE
- /* use index 1 context for FCOE/FSO */
- if (tx_flags & IXGBE_TX_FLAGS_FCOE)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
- (1 << IXGBE_ADVTXD_IDX_SHIFT));
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
+ /* use index 1 context for TSO/FSO/FCOE */
+#ifdef IXGBE_FCOE
+ if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
+#else
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
#endif
+ olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
*/
+#ifdef IXGBE_FCOE
+ if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
+#else
if (tx_flags & IXGBE_TX_FLAGS_TXSW)
+#endif
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
- return olinfo_status;
+ tx_desc->read.olinfo_status = olinfo_status;
}
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
- struct sk_buff *skb,
struct ixgbe_tx_buffer *first,
- u32 tx_flags,
const u8 hdr_len)
{
- struct device *dev = tx_ring->dev;
- struct ixgbe_tx_buffer *tx_buffer_info;
- union ixgbe_adv_tx_desc *tx_desc;
dma_addr_t dma;
- __le32 cmd_type, olinfo_status;
- struct skb_frag_struct *frag;
- unsigned int f = 0;
+ struct sk_buff *skb = first->skb;
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- u32 offset = 0;
- u32 paylen = skb->len - hdr_len;
+ unsigned int paylen = skb->len - hdr_len;
+ u32 tx_flags = first->tx_flags;
+ __le32 cmd_type;
u16 i = tx_ring->next_to_use;
- u16 gso_segs;
+
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+
+ ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
+ cmd_type = ixgbe_tx_cmd_type(tx_flags);
#ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
- if (data_len >= sizeof(struct fcoe_crc_eof)) {
- data_len -= sizeof(struct fcoe_crc_eof);
- } else {
+ if (data_len < sizeof(struct fcoe_crc_eof)) {
size -= sizeof(struct fcoe_crc_eof) - data_len;
data_len = 0;
+ } else {
+ data_len -= sizeof(struct fcoe_crc_eof);
}
}
#endif
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
- cmd_type = ixgbe_tx_cmd_type(tx_flags);
- olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
+ /* record length, and DMA address */
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
for (;;) {
- while (size > IXGBE_MAX_DATA_PER_TXD) {
- tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
- tx_desc->read.olinfo_status = olinfo_status;
-
- offset += IXGBE_MAX_DATA_PER_TXD;
- size -= IXGBE_MAX_DATA_PER_TXD;
- tx_desc++;
i++;
+ tx_desc++;
if (i == tx_ring->count) {
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+
+ dma += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
}
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_buffer_info->length = offset + size;
- tx_buffer_info->tx_flags = tx_flags;
- tx_buffer_info->dma = dma;
+ if (likely(!data_len))
+ break;
- tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ if (unlikely(skb->no_fcs))
+ cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
- tx_desc->read.olinfo_status = olinfo_status;
- if (!data_len)
- break;
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- frag = &skb_shinfo(skb)->frags[f];
#ifdef IXGBE_FCOE
size = min_t(unsigned int, data_len, skb_frag_size(frag));
#else
size = skb_frag_size(frag);
#endif
data_len -= size;
- f++;
-
- offset = 0;
- tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
- tx_desc++;
- i++;
- if (i == tx_ring->count) {
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
- i = 0;
- }
- }
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_ring->next_to_use = i;
+ frag++;
+ }
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
- gso_segs = skb_shinfo(skb)->gso_segs;
-#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- else if (tx_flags & IXGBE_TX_FLAGS_FSO)
- gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
- skb_shinfo(skb)->gso_size);
-#endif /* IXGBE_FCOE */
- else
- gso_segs = 1;
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+ tx_desc->read.cmd_type_len = cmd_type;
- /* multiply data chunks by size of headers */
- tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
- tx_buffer_info->gso_segs = gso_segs;
- tx_buffer_info->skb = skb;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
/*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ * Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
/* notify HW of packet */
writel(i, tx_ring->tail);
return;
dma_error:
- dev_err(dev, "TX DMA map failed\n");
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_buffer_info map */
for (;;) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
- if (tx_buffer_info == first)
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
i--;
}
- dev_kfree_skb_any(skb);
-
tx_ring->next_to_use = i;
}
-static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring,
+ struct ixgbe_tx_buffer *first)
{
struct ixgbe_q_vector *q_vector = ring->q_vector;
union ixgbe_atr_hash_dword input = { .dword = 0 };
@@ -6654,16 +6048,16 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
ring->atr_count++;
/* snag network header to get L4 type and address */
- hdr.network = skb_network_header(skb);
+ hdr.network = skb_network_header(first->skb);
/* Currently only IPv4/IPv6 with TCP is supported */
- if ((protocol != __constant_htons(ETH_P_IPV6) ||
+ if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (protocol != __constant_htons(ETH_P_IP) ||
+ (first->protocol != __constant_htons(ETH_P_IP) ||
hdr.ipv4->protocol != IPPROTO_TCP))
return;
- th = tcp_hdr(skb);
+ th = tcp_hdr(first->skb);
/* skip this packet since it is invalid or the socket is closing */
if (!th || th->fin)
@@ -6676,7 +6070,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
/* reset sample count */
ring->atr_count = 0;
- vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+ vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
/*
* src and dst are inverted, think how the receiver sees them
@@ -6691,13 +6085,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
* since src port and flex bytes occupy the same word XOR them together
* and write the value to source port portion of compressed dword
*/
- if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
+ if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
else
- common.port.src ^= th->dest ^ protocol;
+ common.port.src ^= th->dest ^ first->protocol;
common.port.dst ^= th->source;
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == __constant_htons(ETH_P_IP)) {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
} else {
@@ -6785,7 +6179,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
- * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
* otherwise try next time
@@ -6801,11 +6195,12 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
-#ifdef CONFIG_PCI_IOV
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- tx_flags |= IXGBE_TX_FLAGS_TXSW;
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
-#endif
/* if we have a HW VLAN tag being added default to the HW one */
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
@@ -6818,10 +6213,20 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
protocol = vhdr->h_vlan_encapsulated_proto;
- tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
+ IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Use the l2switch_enable flag - would be false if the DMA
+ * Tx switch had been disabled.
+ */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ tx_flags |= IXGBE_TX_FLAGS_TXSW;
+
+#endif
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
@@ -6842,61 +6247,69 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
}
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = protocol;
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
- else if (tso)
- tx_flags |= IXGBE_TX_FLAGS_FSO |
- IXGBE_TX_FLAGS_FCOE;
- else
- tx_flags |= IXGBE_TX_FLAGS_FCOE;
goto xmit_fcoe;
}
#endif /* IXGBE_FCOE */
- /* setup IPv4/IPv6 offloads */
- if (protocol == __constant_htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
-
- tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ tso = ixgbe_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
- else if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ else if (!tso)
+ ixgbe_tx_csum(tx_ring, first);
/* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
- ixgbe_atr(tx_ring, skb, tx_flags, protocol);
+ ixgbe_atr(tx_ring, first);
#ifdef IXGBE_FCOE
xmit_fcoe:
#endif /* IXGBE_FCOE */
- ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
+ ixgbe_tx_map(tx_ring, first, hdr_len);
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
return NETDEV_TX_OK;
}
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * The minimum packet size for olinfo paylen is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
tx_ring = adapter->tx_ring[skb->queue_mapping];
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
@@ -7029,8 +6442,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
}
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
}
-#endif
+#endif
static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -7079,6 +6492,7 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
return stats;
}
+#ifdef CONFIG_IXGBE_DCB
/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
* #adapter: pointer to ixgbe_adapter
* @tc: number of traffic classes currently enabled
@@ -7115,7 +6529,6 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return;
}
-
/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
* classes.
*
@@ -7135,7 +6548,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
- (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
+ (hw->mac.type == ixgbe_mac_82598EB &&
+ tc < MAX_TRAFFIC_CLASS))
return -EINVAL;
/* Hardware has to reinitialize queues and interrupts to
@@ -7149,7 +6563,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) {
netdev_set_num_tc(dev, tc);
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
-
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -7157,7 +6570,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->hw.fc.requested_mode = ixgbe_fc_none;
} else {
netdev_reset_tc(dev);
-
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
@@ -7175,6 +6587,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return 0;
}
+#endif /* CONFIG_IXGBE_DCB */
void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7186,59 +6599,52 @@ void ixgbe_do_reset(struct net_device *netdev)
}
static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
- netdev_features_t data)
+ netdev_features_t features)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- data &= ~NETIF_F_HW_VLAN_RX;
+ features &= ~NETIF_F_HW_VLAN_RX;
#endif
/* return error if RXHASH is being enabled when RSS is not supported */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- data &= ~NETIF_F_RXHASH;
+ features &= ~NETIF_F_RXHASH;
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
- if (!(data & NETIF_F_RXCSUM))
- data &= ~NETIF_F_LRO;
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
- /* Turn off LRO if not RSC capable or invalid ITR settings */
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
- data &= ~NETIF_F_LRO;
- } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (adapter->rx_itr_setting != 1 &&
- adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
- data &= ~NETIF_F_LRO;
- e_info(probe, "rx-usecs set too low, not enabling RSC\n");
- }
+ /* Turn off LRO if not RSC capable */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+ features &= ~NETIF_F_LRO;
+
- return data;
+ return features;
}
static int ixgbe_set_features(struct net_device *netdev,
- netdev_features_t data)
+ netdev_features_t features)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
bool need_reset = false;
- /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
- if (!(data & NETIF_F_RXCSUM))
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
/* Make sure RSC matches LRO, reset if change */
- if (!!(data & NETIF_F_LRO) !=
- !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X540:
- case ixgbe_mac_82599EB:
+ if (!(features & NETIF_F_LRO)) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
need_reset = true;
- break;
- default:
- break;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ if (adapter->rx_itr_setting == 1 ||
+ adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ need_reset = true;
+ } else if ((changed ^ features) & NETIF_F_LRO) {
+ e_info(probe, "rx-usecs set too low, "
+ "disabling RSC\n");
}
}
@@ -7246,27 +6652,30 @@ static int ixgbe_set_features(struct net_device *netdev,
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
- if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
- /* turn off ATR, enable perfect filters and reset */
- if (data & NETIF_F_NTUPLE) {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ /* turn off Flow Director, set ATR and reset */
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
need_reset = true;
}
- } else if (!(data & NETIF_F_NTUPLE)) {
- /* turn off Flow Director, set ATR and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* turn off ATR, enable perfect filters and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
need_reset = true;
}
+ if (changed & NETIF_F_RXALL)
+ need_reset = true;
+
+ netdev->features = features;
if (need_reset)
ixgbe_do_reset(netdev);
return 0;
-
}
static const struct net_device_ops ixgbe_netdev_ops = {
@@ -7274,7 +6683,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue,
- .ndo_set_rx_mode = ixgbe_set_rx_mode,
+ .ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu,
@@ -7285,10 +6694,12 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
- .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
+ .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
+#ifdef CONFIG_IXGBE_DCB
.ndo_setup_tc = ixgbe_setup_tc,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -7306,7 +6717,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
};
static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
+ const struct ixgbe_info *ii)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_hw *hw = &adapter->hw;
@@ -7493,6 +6904,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
e_crit(probe, "Fan has stopped, replace the adapter\n");
}
+ if (allow_unsupported_sfp)
+ hw->allow_unsupported_sfp = allow_unsupported_sfp;
+
/* reset_hw fills in the perm_addr as well */
hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
@@ -7537,6 +6951,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
break;
}
+ netdev->hw_features |= NETIF_F_RXALL;
+
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_IP_CSUM;
@@ -7544,6 +6960,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_SG;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
@@ -7581,7 +6998,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
err = -EIO;
- goto err_eeprom;
+ goto err_sw_init;
}
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
@@ -7590,11 +7007,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
e_dev_err("invalid MAC address\n");
err = -EIO;
- goto err_eeprom;
+ goto err_sw_init;
}
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
- (unsigned long) adapter);
+ (unsigned long) adapter);
INIT_WORK(&adapter->service_task, ixgbe_service_task);
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
@@ -7682,7 +7099,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
-
if (err == IXGBE_ERR_EEPROM_VERSION) {
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
@@ -7737,7 +7153,6 @@ err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
-err_eeprom:
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index b91773551a38..bf9f82f4b1ae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -834,6 +834,7 @@ out:
**/
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
+ struct ixgbe_adapter *adapter = hw->back;
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u32 vendor_oui = 0;
enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
@@ -1068,9 +1069,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
} else {
- hw_dbg(hw, "SFP+ module not supported\n");
- hw->phy.type = ixgbe_phy_sfp_unsupported;
- status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ if (hw->allow_unsupported_sfp) {
+ e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.");
+ status = 0;
+ } else {
+ hw_dbg(hw,
+ "SFP+ module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
}
} else {
status = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index b01ecb4d2bb1..88a58cb08569 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -258,7 +258,7 @@ static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
list_for_each(pos, &adapter->vf_mvs.l) {
entry = list_entry(pos, struct vf_macvlans, l);
- if (entry->free == false)
+ if (!entry->free)
hw->mac.ops.set_rar(hw, entry->rar_entry,
entry->vf_macvlan,
entry->vf, IXGBE_RAH_AV);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9b95bef60970..8636e8344fc9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1021,14 +1021,16 @@
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@@ -2726,6 +2728,8 @@ struct ixgbe_mac_operations {
s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
+ s32 (*disable_rx_buff)(struct ixgbe_hw *);
+ s32 (*enable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
@@ -2892,6 +2896,7 @@ struct ixgbe_hw {
u8 revision_id;
bool adapter_stopped;
bool force_full_reset;
+ bool allow_unsupported_sfp;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f838a2be8cfb..97a991403bbd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -760,7 +760,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* This will be reversed when we stop the blinking.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
- if (link_up == false) {
+ if (!link_up) {
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
@@ -847,6 +847,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e51d552410ae..581c65976bb4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2199,13 +2199,17 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
if (err) {
dev_info(&pdev->dev,
"PF still in reset state, assigning new address\n");
- dev_hw_addr_random(adapter->netdev, hw->mac.addr);
+ eth_hw_addr_random(adapter->netdev);
+ memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
+ adapter->netdev->addr_len);
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
+ memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
+ adapter->netdev->addr_len);
}
/* Enable dynamic interrupt throttling rates */
@@ -2224,6 +2228,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
set_bit(__IXGBEVF_DOWN, &adapter->state);
+ return 0;
out:
return err;
@@ -2521,12 +2526,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
- if (!rx_ring->rx_buffer_info) {
- hw_dbg(&adapter->hw,
- "Unable to vmalloc buffer memory for "
- "the receive descriptor ring\n");
+ if (!rx_ring->rx_buffer_info)
goto alloc_failed;
- }
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3398,6 +3399,17 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ pr_err("invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
netdev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
@@ -3422,16 +3434,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
- /* The HW MAC address was set and/or determined in sw_init */
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- pr_err("invalid MAC address\n");
- err = -EIO;
- goto err_sw_init;
- }
-
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = ixgbevf_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -3460,13 +3462,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
ixgbevf_init_last_counter_stats(adapter);
/* print the MAC address */
- hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- netdev->dev_addr[0],
- netdev->dev_addr[1],
- netdev->dev_addr[2],
- netdev->dev_addr[3],
- netdev->dev_addr[4],
- netdev->dev_addr[5]);
+ hw_dbg(hw, "%pM\n", netdev->dev_addr);
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 55cbf65512c3..4ea6580d3ae8 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2991,7 +2991,6 @@ jme_init_one(struct pci_dev *pdev,
*/
netdev = alloc_etherdev(sizeof(*jme));
if (!netdev) {
- pr_err("Cannot allocate netdev structure\n");
rc = -ENOMEM;
goto err_out_release_regions;
}
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6ad094f176f8..f30db1c46600 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1108,10 +1108,9 @@ static int korina_probe(struct platform_device *pdev)
int rc;
dev = alloc_etherdev(sizeof(struct korina_private));
- if (!dev) {
- printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
+ if (!dev)
return -ENOMEM;
- }
+
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
@@ -1150,7 +1149,6 @@ static int korina_probe(struct platform_device *pdev)
lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
if (!lp->td_ring) {
- printk(KERN_ERR DRV_NAME ": cannot allocate descriptors\n");
rc = -ENXIO;
goto probe_err_td_ring;
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 85e2c6cd9708..5dc9cbd51514 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -114,7 +114,7 @@ struct ltq_etop_priv {
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
- ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
+ ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
@@ -634,6 +634,7 @@ ltq_etop_init(struct net_device *dev)
struct ltq_etop_priv *priv = netdev_priv(dev);
struct sockaddr mac;
int err;
+ bool random_mac = false;
ether_setup(dev);
dev->watchdog_timeo = 10 * HZ;
@@ -646,11 +647,17 @@ ltq_etop_init(struct net_device *dev)
if (!is_valid_ether_addr(mac.sa_data)) {
pr_warn("etop: invalid MAC, using random\n");
random_ether_addr(mac.sa_data);
+ random_mac = true;
}
err = ltq_etop_set_mac_address(dev, &mac);
if (err)
goto err_netdev;
+
+ /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
+ if (random_mac)
+ dev->addr_assign_type |= NET_ADDR_RANDOM;
+
ltq_etop_set_multicast_list(dev);
err = ltq_etop_mdio_init(dev);
if (err)
@@ -731,6 +738,10 @@ ltq_etop_probe(struct platform_device *pdev)
}
dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out;
+ }
strcpy(dev->name, "eth%d");
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
@@ -792,7 +803,7 @@ init_ltq_etop(void)
int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
if (ret)
- pr_err("ltq_etop: Error registering platfom driver!");
+ pr_err("ltq_etop: Error registering platform driver!");
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 9edecfa1f0f4..75af1afe46c8 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -667,7 +667,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
- skb = dev_alloc_skb(mp->skb_size);
+ skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
@@ -1832,7 +1832,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = addr;
if (!is_valid_ether_addr(sa->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 953ba5851f7b..45a6333588e6 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -220,7 +220,6 @@ struct pxa168_eth_private {
u8 work_todo;
int skb_size;
- struct net_device_stats stats;
/* Size of Tx Ring per queue */
int tx_ring_size;
/* Number of tx descriptors in use */
@@ -350,7 +349,7 @@ static void rxq_refill(struct net_device *dev)
while (pep->rx_desc_count < pep->rx_ring_size) {
int size;
- skb = dev_alloc_skb(pep->skb_size);
+ skb = netdev_alloc_skb(dev, pep->skb_size);
if (!skb)
break;
if (SKB_DMA_REALIGN)
@@ -627,8 +626,9 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
unsigned char oldMac[ETH_ALEN];
if (!is_valid_ether_addr(sa->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
@@ -1017,10 +1017,9 @@ static int rxq_init(struct net_device *dev)
/* Allocate RX skb rings */
pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
GFP_KERNEL);
- if (!pep->rx_skb) {
- printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+ if (!pep->rx_skb)
return -ENOMEM;
- }
+
/* Allocate RX ring */
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
@@ -1081,10 +1080,9 @@ static int txq_init(struct net_device *dev)
pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
GFP_KERNEL);
- if (!pep->tx_skb) {
- printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+ if (!pep->tx_skb)
return -ENOMEM;
- }
+
/* Allocate TX ring */
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
@@ -1522,7 +1520,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
pep->pd = pdev->dev.platform_data;
pep->rx_ring_size = NUM_RX_DESCS;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 33947ac595c0..5a30bf823099 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3807,10 +3807,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
struct skge_port *skge;
struct net_device *dev = alloc_etherdev(sizeof(*skge));
- if (!dev) {
- dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
+ if (!dev)
return NULL;
- }
SET_NETDEV_DEV(dev, &hw->pdev->dev);
dev->netdev_ops = &skge_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 760c2b17dfd3..82c2c86a1951 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4700,10 +4700,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
struct sky2_port *sky2;
struct net_device *dev = alloc_etherdev(sizeof(*sky2));
- if (!dev) {
- dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
+ if (!dev)
return NULL;
- }
SET_NETDEV_DEV(dev, &hw->pdev->dev);
dev->irq = hw->pdev->irq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index eaf09d4f02d0..773c70ea3f62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -239,6 +239,7 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
+ unsigned long end;
int err = 0;
down(&cmd->event_sem);
@@ -268,6 +269,14 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
}
out:
+ /* wait for comm channel ready
+ * this is necessary for prevention the race
+ * when switching between event to polling mode
+ */
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
cmd->free_head = context - cmd->context;
@@ -1314,7 +1323,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
down(&priv->cmd.slave_sem);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
mlx4_err(dev, "Failed processing vhcr for slave:%d,"
- " reseting slave.\n", slave);
+ " resetting slave.\n", slave);
up(&priv->cmd.slave_sem);
goto reset_slave;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 149e60da0a32..31b455a49273 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1048,10 +1048,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
prof->tx_ring_num, prof->rx_ring_num);
- if (dev == NULL) {
- mlx4_err(mdev, "Net device allocation failed\n");
+ if (dev == NULL)
return -ENOMEM;
- }
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
dev->dev_id = port - 1;
@@ -1064,6 +1062,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
memset(priv, 0, sizeof(struct mlx4_en_priv));
priv->dev = dev;
priv->mdev = mdev;
+ priv->ddev = &mdev->pdev->dev;
priv->prof = prof;
priv->port = port;
priv->port_up = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d4ad8c226b51..9adbd53da525 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -48,7 +48,6 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *ring_alloc,
int i)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
struct page *page;
@@ -72,7 +71,7 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
skb_frags[i].offset = page_alloc->offset;
page_alloc->offset += frag_info->frag_stride;
}
- dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+ dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) +
skb_frags[i].offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
rx_desc->data[i].addr = cpu_to_be64(dma);
@@ -186,7 +185,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
dma_addr_t dma;
@@ -198,7 +196,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
dma = be64_to_cpu(rx_desc->data[nr].addr);
en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
PCI_DMA_FROMDEVICE);
put_page(skb_frags[nr].page);
}
@@ -285,10 +283,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct skb_frag_struct));
ring->rx_info = vmalloc(tmp);
- if (!ring->rx_info) {
- en_err(priv, "Failed allocating rx_info ring\n");
+ if (!ring->rx_info)
return -ENOMEM;
- }
+
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
@@ -413,7 +410,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
int length)
{
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_frag_info *frag_info;
int nr;
dma_addr_t dma;
@@ -436,7 +432,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
goto fail;
/* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]),
+ dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]),
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
@@ -462,18 +458,16 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct sk_buff *skb;
void *va;
int used_frags;
dma_addr_t dma;
- skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
- skb->dev = priv->dev;
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
@@ -485,10 +479,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
+ dma_sync_single_for_cpu(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
- dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
+ dma_sync_single_for_device(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb->tail += length;
} else {
@@ -916,7 +910,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
- rss_context->rss_key[i] = rsskey[i];
+ rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9ef9038d0629..17968244c399 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -71,16 +71,14 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc(tmp);
- if (!ring->tx_info) {
- en_err(priv, "Failed allocating tx_info ring\n");
+ if (!ring->tx_info)
return -ENOMEM;
- }
+
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
if (!ring->bounce_buf) {
- en_err(priv, "Failed allocating bounce buffer\n");
err = -ENOMEM;
goto err_tx;
}
@@ -200,7 +198,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u8 owner)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
@@ -216,7 +213,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
if (!tx_info->inl) {
if (tx_info->linear) {
- pci_unmap_single(mdev->pdev,
+ dma_unmap_single(priv->ddev,
(dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
@@ -225,7 +222,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
for (i = 0; i < frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- pci_unmap_page(mdev->pdev,
+ dma_unmap_page(priv->ddev,
(dma_addr_t) be64_to_cpu(data[i].addr),
skb_frag_size(frag), PCI_DMA_TODEVICE);
}
@@ -243,7 +240,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
if (tx_info->linear) {
- pci_unmap_single(mdev->pdev,
+ dma_unmap_single(priv->ddev,
(dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
@@ -255,7 +252,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
if ((void *) data >= end)
data = ring->buf;
frag = &skb_shinfo(skb)->frags[i];
- pci_unmap_page(mdev->pdev,
+ dma_unmap_page(priv->ddev,
(dma_addr_t) be64_to_cpu(data->addr),
skb_frag_size(frag), PCI_DMA_TODEVICE);
++data;
@@ -587,7 +584,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
return skb_tx_hash(dev, skb);
}
-static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
+static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
{
__iowrite64_copy(dst, src, bytecnt / 8);
}
@@ -603,8 +600,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
struct ethhdr *ethh;
- u64 mac;
- u32 mac_l, mac_h;
int tx_ind = 0;
int nr_txbb;
int desc_size;
@@ -689,16 +684,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Copy dst mac address to wqe */
- skb_reset_mac_header(skb);
- ethh = eth_hdr(skb);
- if (ethh && ethh->h_dest) {
- mac = mlx4_en_mac_to_u64(ethh->h_dest);
- mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
- mac_l = (u32) (mac & 0xffffffff);
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
- tx_desc->ctrl.imm = cpu_to_be32(mac_l);
- }
-
+ ethh = (struct ethhdr *)skb->data;
+ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
@@ -744,7 +732,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Map fragments */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
- dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag,
+ dma = skb_frag_dma_map(priv->ddev, frag,
0, skb_frag_size(frag),
DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma);
@@ -756,7 +744,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Map linear part */
if (tx_info->linear) {
- dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
+ dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9ea7cabcaf3c..2a02ba522e60 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -685,16 +685,6 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
-{
- struct mlx4_cmd_mailbox *outbox = ptr;
-
- return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
- MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
-}
-EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
-
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d498f049c74e..5f15014713bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1545,13 +1545,11 @@ static int mlx4_init_steering(struct mlx4_dev *dev)
if (!priv->steer)
return -ENOMEM;
- for (i = 0; i < num_entries; i++) {
+ for (i = 0; i < num_entries; i++)
for (j = 0; j < MLX4_NUM_STEERS; j++) {
INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
}
- INIT_LIST_HEAD(&priv->steer[i].high_prios);
- }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ca574d850b39..4799e824052f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -562,14 +562,14 @@ out_mutex:
*/
static int find_entry(struct mlx4_dev *dev, u8 port,
u8 *gid, enum mlx4_protocol prot,
- enum mlx4_steer_type steer,
struct mlx4_cmd_mailbox *mgm_mailbox,
- u16 *hash, int *prev, int *index)
+ int *prev, int *index)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
+ u16 hash;
u8 op_mod = (prot == MLX4_PROT_ETH) ?
!!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
@@ -580,15 +580,15 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
memcpy(mgid, gid, 16);
- err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
+ err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
return err;
if (0)
- mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
+ mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
- *index = *hash;
+ *index = hash;
*prev = -1;
do {
@@ -597,7 +597,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
- if (*index != *hash) {
+ if (*index != hash) {
mlx4_err(dev, "Found zero MGID in AMGM.\n");
err = -EINVAL;
}
@@ -624,7 +624,6 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
- u16 hash;
int index, prev;
int link = 0;
int i;
@@ -638,8 +637,8 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm = mailbox->buf;
mutex_lock(&priv->mcg_table.mutex);
- err = find_entry(dev, port, gid, prot, steer,
- mailbox, &hash, &prev, &index);
+ err = find_entry(dev, port, gid, prot,
+ mailbox, &prev, &index);
if (err)
goto out;
@@ -733,7 +732,6 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
- u16 hash;
int prev, index;
int i, loc;
int err;
@@ -747,8 +745,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mutex_lock(&priv->mcg_table.mutex);
- err = find_entry(dev, port, gid, prot, steer,
- mailbox, &hash, &prev, &index);
+ err = find_entry(dev, port, gid, prot,
+ mailbox, &prev, &index);
if (err)
goto out;
@@ -872,44 +870,36 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
{
- enum mlx4_steer_type steer;
-
- steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
-
if (prot == MLX4_PROT_ETH &&
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
if (prot == MLX4_PROT_ETH)
- gid[7] |= (steer << 1);
+ gid[7] |= (MLX4_MC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 1,
block_mcast_loopback, prot);
return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
- prot, steer);
+ prot, MLX4_MC_STEER);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot)
{
- enum mlx4_steer_type steer;
-
- steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
-
if (prot == MLX4_PROT_ETH &&
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
if (prot == MLX4_PROT_ETH)
- gid[7] |= (steer << 1);
+ gid[7] |= (MLX4_MC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
- return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
+ return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 28f8251561f4..5da51b99dbb8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -703,7 +703,6 @@ struct mlx4_msix_ctl {
struct mlx4_steer {
struct list_head promisc_qps[MLX4_NUM_STEERS];
struct list_head steer_entries[MLX4_NUM_STEERS];
- struct list_head high_prios;
};
struct mlx4_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d60335f3c473..9e2b911a1230 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
- u32 ctrl_flags;
+ __be32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
@@ -482,6 +482,7 @@ struct mlx4_en_priv {
struct mlx4_en_stat_out_mbox hw_stats;
int vids[128];
bool wol;
+ struct device *ddev;
};
enum mlx4_en_wol {
@@ -552,10 +553,6 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
- u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
- u8 promisc);
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 25a80d71fb2a..32e2b6616f3d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -304,30 +304,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
-int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- u32 *base_mridx)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- u32 mridx;
-
- mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
- if (mridx == -1)
- return -ENOMEM;
-
- *base_mridx = mridx;
- return 0;
-
-}
-EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
-
-void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
-}
-EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
-
-int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
u64 iova, u64 size, u32 access, int npages,
int page_shift, struct mlx4_mr *mr)
{
@@ -340,7 +317,6 @@ int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
}
-EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox,
@@ -457,7 +433,7 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
int err;
@@ -472,7 +448,6 @@ void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
}
mlx4_mtt_cleanup(dev, &mr->mtt);
}
-EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
@@ -852,46 +827,6 @@ err_free:
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
-int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
- u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int err = -ENOMEM;
-
- if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
- return -EINVAL;
-
- /* All MTTs must fit in the same page */
- if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
- return -EINVAL;
-
- fmr->page_shift = page_shift;
- fmr->max_pages = max_pages;
- fmr->max_maps = max_maps;
- fmr->maps = 0;
-
- err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
- page_shift, &fmr->mr);
- if (err)
- return err;
-
- fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.offset,
- &fmr->dma_handle);
- if (!fmr->mtts) {
- err = -ENOMEM;
- goto err_free;
- }
-
- return 0;
-
-err_free:
- mlx4_mr_free_reserved(dev, &fmr->mr);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
-
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -954,18 +889,6 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
-int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
- if (fmr->maps)
- return -EBUSY;
-
- mlx4_mr_free_reserved(dev, &fmr->mr);
- fmr->mr.enabled = MLX4_MR_DISABLED;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
-
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index f44ae555bf43..98e776261ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -79,15 +79,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
+ __be64 be_mac;
int err;
qp.qpn = *qpn;
mac &= 0xffffffffffffULL;
- mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &mac, ETH_ALEN);
+ be_mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
gid[5] = port;
- gid[7] = MLX4_UC_STEER << 1;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
if (err)
@@ -101,13 +101,13 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
{
struct mlx4_qp qp;
u8 gid[16] = {0};
+ __be64 be_mac;
qp.qpn = qpn;
mac &= 0xffffffffffffULL;
- mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &mac, ETH_ALEN);
+ be_mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
gid[5] = port;
- gid[7] = MLX4_UC_STEER << 1;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index ab81c0dc96e2..dccae1d1743a 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -278,7 +278,8 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
if (!ksp->rx_buffers[buff_n].skb) {
- struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
+ struct sk_buff *skb =
+ netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
dma_addr_t mapping;
ksp->rx_buffers[buff_n].skb = skb;
@@ -299,7 +300,6 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
break;
}
ksp->rx_buffers[buff_n].dma_ptr = mapping;
- skb->dev = ksp->ndev;
ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
/* Record this into the DMA ring */
@@ -1362,10 +1362,8 @@ ks8695_probe(struct platform_device *pdev)
/* Initialise a net_device */
ndev = alloc_etherdev(sizeof(struct ks8695_priv));
- if (!ndev) {
- dev_err(&pdev->dev, "could not allocate device.\n");
+ if (!ndev)
return -ENOMEM;
- }
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0a85690a1321..0686b93f1857 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1080,6 +1080,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, mac, netdev->addr_len);
ks8842_write_mac_addr(adapter, mac);
@@ -1211,7 +1212,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
ks8842_read_mac_addr(adapter, netdev->dev_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
- random_ether_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
}
id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 0c3e4005224d..c722aa607d07 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1,4 +1,4 @@
-/* drivers/net/ks8851.c
+/* drivers/net/ethernet/micrel/ks8851.c
*
* Copyright 2009 Simtec Electronics
* http://www.simtec.co.uk/
@@ -439,13 +439,13 @@ static void ks8851_init_mac(struct ks8851_net *ks)
dev->dev_addr);
}
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
ks8851_write_mac_addr(dev);
}
/**
* ks8851_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ hnalder.
+ * @irq: Interrupt number passed from the IRQ handler.
* @pw: The private word passed to register_irq(), our struct ks8851_net.
*
* Disable the interrupt from happening again until we've processed the
@@ -1050,6 +1050,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return ks8851_write_mac_addr(dev);
}
@@ -1419,10 +1420,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
int ret;
ndev = alloc_etherdev(sizeof(struct ks8851_net));
- if (!ndev) {
- dev_err(&spi->dev, "failed to alloc ethernet device\n");
+ if (!ndev)
return -ENOMEM;
- }
spi->bits_per_word = 8;
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index b0fae86aacad..852256ef1f22 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -1,4 +1,4 @@
-/* drivers/net/ks8851.h
+/* drivers/net/ethernet/micrel/ks8851.h
*
* Copyright 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 2784bc706f1e..b8104d9f4081 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1,5 +1,5 @@
/**
- * drivers/net/ks8851_mll.c
+ * drivers/net/ethernet/micrel/ks8851_mll.c
* Copyright (c) 2009 Micrel Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -794,7 +794,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
frame_hdr = ks->frame_head_info;
while (ks->frame_cnt--) {
- skb = dev_alloc_skb(frame_hdr->len + 16);
+ skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
(frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
skb_reserve(skb, 2);
@@ -837,7 +837,7 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
/**
* ks_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ hnalder.
+ * @irq: Interrupt number passed from the IRQ handler.
* @pw: The private word passed to register_irq(), our struct ks_net.
*
* This is the handler invoked to find out what happened
@@ -1239,6 +1239,7 @@ static int ks_set_mac_address(struct net_device *netdev, void *paddr)
struct sockaddr *addr = paddr;
u8 *da;
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
da = (u8 *)netdev->dev_addr;
@@ -1499,10 +1500,8 @@ static int ks_hw_init(struct ks_net *ks)
ks->mcast_lst_size = 0;
ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
- if (!ks->frame_head_info) {
- pr_err("Error: Fail to allocate frame memory\n");
+ if (!ks->frame_head_info)
return false;
- }
ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
return true;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e52cd310ae76..ef723b185d85 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1,5 +1,5 @@
/**
- * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
+ * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
*
* Copyright (c) 2009-2010 Micrel, Inc.
* Tristram Ha <Tristram.Ha@micrel.com>
@@ -4863,7 +4863,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
memset(&skb->data[skb->len], 0, 50 - skb->len);
skb->len = 50;
} else {
- skb = dev_alloc_skb(50);
+ skb = netdev_alloc_skb(dev, 50);
if (!skb)
return NETDEV_TX_BUSY;
memcpy(skb->data, org_skb->data, org_skb->len);
@@ -4885,7 +4885,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
(ETH_P_IPV6 == htons(skb->protocol)))) {
struct sk_buff *org_skb = skb;
- skb = dev_alloc_skb(org_skb->len);
+ skb = netdev_alloc_skb(dev, org_skb->len);
if (!skb) {
rc = NETDEV_TX_BUSY;
goto unlock;
@@ -5019,7 +5019,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
do {
/* skb->data != skb->head */
- skb = dev_alloc_skb(packet_len + 2);
+ skb = netdev_alloc_skb(dev, packet_len + 2);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 50055e0282ed..6118bdad244f 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -527,6 +527,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
return enc28j60_set_hw_macaddr(dev);
}
@@ -954,14 +955,13 @@ static void enc28j60_hw_rx(struct net_device *ndev)
if (len > MAX_FRAMELEN)
ndev->stats.rx_over_errors++;
} else {
- skb = dev_alloc_skb(len + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
if (!skb) {
if (netif_msg_rx_err(priv))
dev_err(&ndev->dev,
"out of memory for Rx'd frame\n");
ndev->stats.rx_dropped++;
} else {
- skb->dev = ndev;
skb_reserve(skb, NET_IP_ALIGN);
/* copy the packet from the receive buffer */
enc28j60_mem_read(priv,
@@ -1553,9 +1553,6 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
dev = alloc_etherdev(sizeof(struct enc28j60_net));
if (!dev) {
- if (netif_msg_drv(&debug))
- dev_err(&spi->dev, DRV_NAME
- ": unable to alloc new ethernet\n");
ret = -ENOMEM;
goto error_alloc;
}
@@ -1579,7 +1576,7 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
ret = -EIO;
goto error_irq;
}
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
/* Board setup must set the relevant edge trigger type;
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
index d05b0c9e1e9c..db5285befe2a 100644
--- a/drivers/net/ethernet/mipsnet.c
+++ b/drivers/net/ethernet/mipsnet.c
@@ -152,7 +152,7 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
if (!len)
return len;
- skb = dev_alloc_skb(len + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
@@ -281,7 +281,7 @@ static int __devinit mipsnet_probe(struct platform_device *dev)
* Lacking any better mechanism to allocate a MAC address we use a
* random one ...
*/
- random_ether_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 20b72ecb020a..27273ae1a6e6 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3910,10 +3910,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int board_number;
netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
- if (netdev == NULL) {
- dev_err(dev, "Could not allocate ethernet device\n");
+ if (netdev == NULL)
return -ENOMEM;
- }
SET_NETDEV_DEV(netdev, &pdev->dev);
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
index 999407f7ebdf..3f94ddbf4dc0 100644
--- a/drivers/net/ethernet/natsemi/ibmlana.c
+++ b/drivers/net/ethernet/natsemi/ibmlana.c
@@ -589,7 +589,7 @@ static void irqrx_handler(struct net_device *dev)
/* fetch buffer */
- skb = dev_alloc_skb(rda.length + 2);
+ skb = netdev_alloc_skb(dev, rda.length + 2);
if (skb == NULL)
dev->stats.rx_dropped++;
else {
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index f1b85561c650..e640e23460de 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -307,7 +307,7 @@ static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 "
"seems invalid, will use a random MAC\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index ac7b16b6e7af..d38e48d4f430 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1934,11 +1934,10 @@ static void refill_rx(struct net_device *dev)
int entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
- skb = dev_alloc_skb(buflen);
+ skb = netdev_alloc_skb(dev, buflen);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- skb->dev = dev; /* Mark as being used by this device. */
np->rx_dma[entry] = pci_map_single(np->pci_dev,
skb->data, buflen, PCI_DMA_FROMDEVICE);
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
@@ -2344,7 +2343,7 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
/* Check if the packet is long enough to accept
* without copying to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
/* 16 byte align the IP header */
skb_reserve(skb, RX_OFFSET);
pci_dma_sync_single_for_cpu(np->pci_dev,
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 26e25d7f5829..46795e403467 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -51,7 +51,7 @@ static int sonic_open(struct net_device *dev)
printk("sonic_open: initializing sonic driver.\n");
for (i = 0; i < SONIC_NUM_RRS; i++) {
- struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
while(i > 0) { /* free any that were allocated successfully */
i--;
@@ -422,7 +422,7 @@ static void sonic_rx(struct net_device *dev)
status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
if (status & SONIC_RCR_PRX) {
/* Malloc up new buffer. */
- new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (new_skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 97f63e12d86e..22a8de00bf02 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2524,7 +2524,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
/* allocate skb */
- skb = dev_alloc_skb(size);
+ skb = netdev_alloc_skb(nic->dev, size);
if (!skb) {
DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
ring->dev->name);
@@ -5248,7 +5248,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
@@ -6820,7 +6820,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
*/
rxdp1->Buffer0_ptr = *temp0;
} else {
- *skb = dev_alloc_skb(size);
+ *skb = netdev_alloc_skb(dev, size);
if (!(*skb)) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory to allocate %s\n",
@@ -6849,7 +6849,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
rxdp3->Buffer0_ptr = *temp0;
rxdp3->Buffer1_ptr = *temp1;
} else {
- *skb = dev_alloc_skb(size);
+ *skb = netdev_alloc_skb(dev, size);
if (!(*skb)) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory to allocate %s\n",
@@ -7760,7 +7760,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
else
dev = alloc_etherdev(sizeof(struct s2io_nic));
if (dev == NULL) {
- DBG_PRINT(ERR_DBG, "Device allocation failed\n");
pci_disable_device(pdev);
pci_release_regions(pdev);
return -ENODEV;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 8d288af16fc9..9d11ab7521bc 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/netx-eth.c
+ * drivers/net/ethernet/netx-eth.c
*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
@@ -150,7 +150,7 @@ static void netx_eth_receive(struct net_device *ndev)
seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
- skb = dev_alloc_skb(len);
+ skb = netdev_alloc_skb(ndev, len);
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
ndev->name);
@@ -383,7 +383,6 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof (struct netx_eth_priv));
if (!ndev) {
- printk("%s: could not allocate device.\n", CARDNAME);
ret = -ENOMEM;
goto exit;
}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index b75a0497d58d..6893a65ae55f 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -735,7 +735,7 @@ static void netdev_rx(struct net_device *dev)
if (status & RXDS_RXGD) {
data = ether->rdesc->recv_buf[ether->cur_rx];
- skb = dev_alloc_skb(length+2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
dev_err(&pdev->dev, "get skb buffer error\n");
ether->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 4c4e7f458383..8561dd25db66 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1815,7 +1815,7 @@ static int nv_alloc_rx(struct net_device *dev)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
@@ -1850,7 +1850,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
@@ -3022,6 +3022,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -4993,9 +4994,9 @@ static int nv_loopback_test(struct net_device *dev)
/* setup packet for tx */
pkt_len = ETH_DATA_LEN;
- tx_skb = dev_alloc_skb(pkt_len);
+ tx_skb = netdev_alloc_skb(dev, pkt_len);
if (!tx_skb) {
- netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
+ netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
@@ -5741,7 +5742,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_err(&pci_dev->dev,
"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
dev->dev_addr);
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
dev_err(&pci_dev->dev,
"Using random MAC address: %pM\n", dev->dev_addr);
}
diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig
new file mode 100644
index 000000000000..0d9baf98a3b9
--- /dev/null
+++ b/drivers/net/ethernet/nxp/Kconfig
@@ -0,0 +1,8 @@
+config LPC_ENET
+ tristate "NXP ethernet MAC on LPC devices"
+ depends on ARCH_LPC32XX
+ select PHYLIB
+ help
+ Say Y or M here if you want to use the NXP ethernet MAC included on
+ some NXP LPC devices. You can safely enable this option for LPC32xx
+ SoC. Also available as a module.
diff --git a/drivers/net/ethernet/nxp/Makefile b/drivers/net/ethernet/nxp/Makefile
new file mode 100644
index 000000000000..a128114e6895
--- /dev/null
+++ b/drivers/net/ethernet/nxp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LPC_ENET) += lpc_eth.o
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
new file mode 100644
index 000000000000..69444247c20b
--- /dev/null
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -0,0 +1,1604 @@
+/*
+ * drivers/net/ethernet/nxp/lpc_eth.c
+ *
+ * Author: Kevin Wells <kevin.wells@nxp.com>
+ *
+ * Copyright (C) 2010 NXP Semiconductors
+ * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/clk.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_net.h>
+#include <linux/types.h>
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <mach/board.h>
+#include <mach/platform.h>
+#include <mach/hardware.h>
+
+#define MODNAME "lpc-eth"
+#define DRV_VERSION "1.00"
+#define PHYDEF_ADDR 0x00
+
+#define ENET_MAXF_SIZE 1536
+#define ENET_RX_DESC 48
+#define ENET_TX_DESC 16
+
+#define NAPI_WEIGHT 16
+
+/*
+ * Ethernet MAC controller Register offsets
+ */
+#define LPC_ENET_MAC1(x) (x + 0x000)
+#define LPC_ENET_MAC2(x) (x + 0x004)
+#define LPC_ENET_IPGT(x) (x + 0x008)
+#define LPC_ENET_IPGR(x) (x + 0x00C)
+#define LPC_ENET_CLRT(x) (x + 0x010)
+#define LPC_ENET_MAXF(x) (x + 0x014)
+#define LPC_ENET_SUPP(x) (x + 0x018)
+#define LPC_ENET_TEST(x) (x + 0x01C)
+#define LPC_ENET_MCFG(x) (x + 0x020)
+#define LPC_ENET_MCMD(x) (x + 0x024)
+#define LPC_ENET_MADR(x) (x + 0x028)
+#define LPC_ENET_MWTD(x) (x + 0x02C)
+#define LPC_ENET_MRDD(x) (x + 0x030)
+#define LPC_ENET_MIND(x) (x + 0x034)
+#define LPC_ENET_SA0(x) (x + 0x040)
+#define LPC_ENET_SA1(x) (x + 0x044)
+#define LPC_ENET_SA2(x) (x + 0x048)
+#define LPC_ENET_COMMAND(x) (x + 0x100)
+#define LPC_ENET_STATUS(x) (x + 0x104)
+#define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
+#define LPC_ENET_RXSTATUS(x) (x + 0x10C)
+#define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
+#define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
+#define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
+#define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
+#define LPC_ENET_TXSTATUS(x) (x + 0x120)
+#define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
+#define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
+#define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
+#define LPC_ENET_TSV0(x) (x + 0x158)
+#define LPC_ENET_TSV1(x) (x + 0x15C)
+#define LPC_ENET_RSV(x) (x + 0x160)
+#define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
+#define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
+#define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
+#define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
+#define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
+#define LPC_ENET_HASHFILTERL(x) (x + 0x210)
+#define LPC_ENET_HASHFILTERH(x) (x + 0x214)
+#define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
+#define LPC_ENET_INTENABLE(x) (x + 0xFE4)
+#define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
+#define LPC_ENET_INTSET(x) (x + 0xFEC)
+#define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
+
+/*
+ * mac1 register definitions
+ */
+#define LPC_MAC1_RECV_ENABLE (1 << 0)
+#define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
+#define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
+#define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
+#define LPC_MAC1_LOOPBACK (1 << 4)
+#define LPC_MAC1_RESET_TX (1 << 8)
+#define LPC_MAC1_RESET_MCS_TX (1 << 9)
+#define LPC_MAC1_RESET_RX (1 << 10)
+#define LPC_MAC1_RESET_MCS_RX (1 << 11)
+#define LPC_MAC1_SIMULATION_RESET (1 << 14)
+#define LPC_MAC1_SOFT_RESET (1 << 15)
+
+/*
+ * mac2 register definitions
+ */
+#define LPC_MAC2_FULL_DUPLEX (1 << 0)
+#define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
+#define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
+#define LPC_MAC2_DELAYED_CRC (1 << 3)
+#define LPC_MAC2_CRC_ENABLE (1 << 4)
+#define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
+#define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
+#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
+#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
+#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
+#define LPC_MAC2_NO_BACKOFF (1 << 12)
+#define LPC_MAC2_BACK_PRESSURE (1 << 13)
+#define LPC_MAC2_EXCESS_DEFER (1 << 14)
+
+/*
+ * ipgt register definitions
+ */
+#define LPC_IPGT_LOAD(n) ((n) & 0x7F)
+
+/*
+ * ipgr register definitions
+ */
+#define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
+#define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
+
+/*
+ * clrt register definitions
+ */
+#define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
+#define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
+
+/*
+ * maxf register definitions
+ */
+#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
+
+/*
+ * supp register definitions
+ */
+#define LPC_SUPP_SPEED (1 << 8)
+#define LPC_SUPP_RESET_RMII (1 << 11)
+
+/*
+ * test register definitions
+ */
+#define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
+#define LPC_TEST_PAUSE (1 << 1)
+#define LPC_TEST_BACKPRESSURE (1 << 2)
+
+/*
+ * mcfg register definitions
+ */
+#define LPC_MCFG_SCAN_INCREMENT (1 << 0)
+#define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
+#define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
+#define LPC_MCFG_CLOCK_HOST_DIV_4 0
+#define LPC_MCFG_CLOCK_HOST_DIV_6 2
+#define LPC_MCFG_CLOCK_HOST_DIV_8 3
+#define LPC_MCFG_CLOCK_HOST_DIV_10 4
+#define LPC_MCFG_CLOCK_HOST_DIV_14 5
+#define LPC_MCFG_CLOCK_HOST_DIV_20 6
+#define LPC_MCFG_CLOCK_HOST_DIV_28 7
+#define LPC_MCFG_RESET_MII_MGMT (1 << 15)
+
+/*
+ * mcmd register definitions
+ */
+#define LPC_MCMD_READ (1 << 0)
+#define LPC_MCMD_SCAN (1 << 1)
+
+/*
+ * madr register definitions
+ */
+#define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
+#define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
+
+/*
+ * mwtd register definitions
+ */
+#define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
+
+/*
+ * mrdd register definitions
+ */
+#define LPC_MRDD_READ_MASK 0xFFFF
+
+/*
+ * mind register definitions
+ */
+#define LPC_MIND_BUSY (1 << 0)
+#define LPC_MIND_SCANNING (1 << 1)
+#define LPC_MIND_NOT_VALID (1 << 2)
+#define LPC_MIND_MII_LINK_FAIL (1 << 3)
+
+/*
+ * command register definitions
+ */
+#define LPC_COMMAND_RXENABLE (1 << 0)
+#define LPC_COMMAND_TXENABLE (1 << 1)
+#define LPC_COMMAND_REG_RESET (1 << 3)
+#define LPC_COMMAND_TXRESET (1 << 4)
+#define LPC_COMMAND_RXRESET (1 << 5)
+#define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
+#define LPC_COMMAND_PASSRXFILTER (1 << 7)
+#define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
+#define LPC_COMMAND_RMII (1 << 9)
+#define LPC_COMMAND_FULLDUPLEX (1 << 10)
+
+/*
+ * status register definitions
+ */
+#define LPC_STATUS_RXACTIVE (1 << 0)
+#define LPC_STATUS_TXACTIVE (1 << 1)
+
+/*
+ * tsv0 register definitions
+ */
+#define LPC_TSV0_CRC_ERROR (1 << 0)
+#define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
+#define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
+#define LPC_TSV0_DONE (1 << 3)
+#define LPC_TSV0_MULTICAST (1 << 4)
+#define LPC_TSV0_BROADCAST (1 << 5)
+#define LPC_TSV0_PACKET_DEFER (1 << 6)
+#define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
+#define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
+#define LPC_TSV0_LATE_COLLISION (1 << 9)
+#define LPC_TSV0_GIANT (1 << 10)
+#define LPC_TSV0_UNDERRUN (1 << 11)
+#define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
+#define LPC_TSV0_CONTROL_FRAME (1 << 28)
+#define LPC_TSV0_PAUSE (1 << 29)
+#define LPC_TSV0_BACKPRESSURE (1 << 30)
+#define LPC_TSV0_VLAN (1 << 31)
+
+/*
+ * tsv1 register definitions
+ */
+#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
+#define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
+
+/*
+ * rsv register definitions
+ */
+#define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
+#define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
+#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
+#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
+#define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
+#define LPC_RSV_CRC_ERROR (1 << 20)
+#define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
+#define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
+#define LPC_RSV_RECEIVE_OK (1 << 23)
+#define LPC_RSV_MULTICAST (1 << 24)
+#define LPC_RSV_BROADCAST (1 << 25)
+#define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
+#define LPC_RSV_CONTROL_FRAME (1 << 27)
+#define LPC_RSV_PAUSE (1 << 28)
+#define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
+#define LPC_RSV_VLAN (1 << 30)
+
+/*
+ * flowcontrolcounter register definitions
+ */
+#define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
+#define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
+
+/*
+ * flowcontrolstatus register definitions
+ */
+#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
+
+/*
+ * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
+ * register definitions
+ */
+#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
+#define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
+#define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
+#define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
+#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
+#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
+
+/*
+ * rxfliterctrl register definitions
+ */
+#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
+#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
+
+/*
+ * rxfilterwolstatus/rxfilterwolclear register definitions
+ */
+#define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
+#define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
+
+/*
+ * intstatus, intenable, intclear, and Intset shared register
+ * definitions
+ */
+#define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
+#define LPC_MACINT_RXERRORONINT (1 << 1)
+#define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
+#define LPC_MACINT_RXDONEINTEN (1 << 3)
+#define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
+#define LPC_MACINT_TXERRORINTEN (1 << 5)
+#define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
+#define LPC_MACINT_TXDONEINTEN (1 << 7)
+#define LPC_MACINT_SOFTINTEN (1 << 12)
+#define LPC_MACINT_WAKEUPINTEN (1 << 13)
+
+/*
+ * powerdown register definitions
+ */
+#define LPC_POWERDOWN_MACAHB (1 << 31)
+
+/* Upon the upcoming introduction of device tree usage in LPC32xx,
+ * lpc_phy_interface_mode() and use_iram_for_net() will be extended with a
+ * device parameter for access to device tree information at runtime, instead
+ * of defining the values at compile time
+ */
+static inline phy_interface_t lpc_phy_interface_mode(void)
+{
+#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
+ return PHY_INTERFACE_MODE_MII;
+#else
+ return PHY_INTERFACE_MODE_RMII;
+#endif
+}
+
+static inline int use_iram_for_net(void)
+{
+#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/* Receive Status information word */
+#define RXSTATUS_SIZE 0x000007FF
+#define RXSTATUS_CONTROL (1 << 18)
+#define RXSTATUS_VLAN (1 << 19)
+#define RXSTATUS_FILTER (1 << 20)
+#define RXSTATUS_MULTICAST (1 << 21)
+#define RXSTATUS_BROADCAST (1 << 22)
+#define RXSTATUS_CRC (1 << 23)
+#define RXSTATUS_SYMBOL (1 << 24)
+#define RXSTATUS_LENGTH (1 << 25)
+#define RXSTATUS_RANGE (1 << 26)
+#define RXSTATUS_ALIGN (1 << 27)
+#define RXSTATUS_OVERRUN (1 << 28)
+#define RXSTATUS_NODESC (1 << 29)
+#define RXSTATUS_LAST (1 << 30)
+#define RXSTATUS_ERROR (1 << 31)
+
+#define RXSTATUS_STATUS_ERROR \
+ (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
+ RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
+
+/* Receive Descriptor control word */
+#define RXDESC_CONTROL_SIZE 0x000007FF
+#define RXDESC_CONTROL_INT (1 << 31)
+
+/* Transmit Status information word */
+#define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
+#define TXSTATUS_DEFER (1 << 25)
+#define TXSTATUS_EXCESSDEFER (1 << 26)
+#define TXSTATUS_EXCESSCOLL (1 << 27)
+#define TXSTATUS_LATECOLL (1 << 28)
+#define TXSTATUS_UNDERRUN (1 << 29)
+#define TXSTATUS_NODESC (1 << 30)
+#define TXSTATUS_ERROR (1 << 31)
+
+/* Transmit Descriptor control word */
+#define TXDESC_CONTROL_SIZE 0x000007FF
+#define TXDESC_CONTROL_OVERRIDE (1 << 26)
+#define TXDESC_CONTROL_HUGE (1 << 27)
+#define TXDESC_CONTROL_PAD (1 << 28)
+#define TXDESC_CONTROL_CRC (1 << 29)
+#define TXDESC_CONTROL_LAST (1 << 30)
+#define TXDESC_CONTROL_INT (1 << 31)
+
+static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+
+/*
+ * Structure of a TX/RX descriptors and RX status
+ */
+struct txrx_desc_t {
+ __le32 packet;
+ __le32 control;
+};
+struct rx_status_t {
+ __le32 statusinfo;
+ __le32 statushashcrc;
+};
+
+/*
+ * Device driver data structure
+ */
+struct netdata_local {
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ spinlock_t lock;
+ void __iomem *net_base;
+ u32 msg_enable;
+ struct sk_buff *skb[ENET_TX_DESC];
+ unsigned int last_tx_idx;
+ unsigned int num_used_tx_buffs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ struct clk *clk;
+ dma_addr_t dma_buff_base_p;
+ void *dma_buff_base_v;
+ size_t dma_buff_size;
+ struct txrx_desc_t *tx_desc_v;
+ u32 *tx_stat_v;
+ void *tx_buff_v;
+ struct txrx_desc_t *rx_desc_v;
+ struct rx_status_t *rx_stat_v;
+ void *rx_buff_v;
+ int link;
+ int speed;
+ int duplex;
+ struct napi_struct napi;
+};
+
+/*
+ * MAC support functions
+ */
+static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+{
+ u32 tmp;
+
+ /* Set station address */
+ tmp = mac[0] | ((u32)mac[1] << 8);
+ writel(tmp, LPC_ENET_SA2(pldat->net_base));
+ tmp = mac[2] | ((u32)mac[3] << 8);
+ writel(tmp, LPC_ENET_SA1(pldat->net_base));
+ tmp = mac[4] | ((u32)mac[5] << 8);
+ writel(tmp, LPC_ENET_SA0(pldat->net_base));
+
+ netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
+}
+
+static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
+{
+ u32 tmp;
+
+ /* Get station address */
+ tmp = readl(LPC_ENET_SA2(pldat->net_base));
+ mac[0] = tmp & 0xFF;
+ mac[1] = tmp >> 8;
+ tmp = readl(LPC_ENET_SA1(pldat->net_base));
+ mac[2] = tmp & 0xFF;
+ mac[3] = tmp >> 8;
+ tmp = readl(LPC_ENET_SA0(pldat->net_base));
+ mac[4] = tmp & 0xFF;
+ mac[5] = tmp >> 8;
+}
+
+static void __lpc_eth_clock_enable(struct netdata_local *pldat,
+ bool enable)
+{
+ if (enable)
+ clk_enable(pldat->clk);
+ else
+ clk_disable(pldat->clk);
+}
+
+static void __lpc_params_setup(struct netdata_local *pldat)
+{
+ u32 tmp;
+
+ if (pldat->duplex == DUPLEX_FULL) {
+ tmp = readl(LPC_ENET_MAC2(pldat->net_base));
+ tmp |= LPC_MAC2_FULL_DUPLEX;
+ writel(tmp, LPC_ENET_MAC2(pldat->net_base));
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp |= LPC_COMMAND_FULLDUPLEX;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
+ } else {
+ tmp = readl(LPC_ENET_MAC2(pldat->net_base));
+ tmp &= ~LPC_MAC2_FULL_DUPLEX;
+ writel(tmp, LPC_ENET_MAC2(pldat->net_base));
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp &= ~LPC_COMMAND_FULLDUPLEX;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
+ }
+
+ if (pldat->speed == SPEED_100)
+ writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
+ else
+ writel(0, LPC_ENET_SUPP(pldat->net_base));
+}
+
+static void __lpc_eth_reset(struct netdata_local *pldat)
+{
+ /* Reset all MAC logic */
+ writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
+ LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
+ LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
+ writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
+ LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
+}
+
+static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
+{
+ /* Reset MII management hardware */
+ writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
+
+ /* Setup MII clock to slowest rate with a /28 divider */
+ writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
+ LPC_ENET_MCFG(pldat->net_base));
+
+ return 0;
+}
+
+static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
+{
+ phys_addr_t phaddr;
+
+ phaddr = addr - pldat->dma_buff_base_v;
+ phaddr += pldat->dma_buff_base_p;
+
+ return phaddr;
+}
+
+static void lpc_eth_enable_int(void __iomem *regbase)
+{
+ writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
+ LPC_ENET_INTENABLE(regbase));
+}
+
+static void lpc_eth_disable_int(void __iomem *regbase)
+{
+ writel(0, LPC_ENET_INTENABLE(regbase));
+}
+
+/* Setup TX/RX descriptors */
+static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
+{
+ u32 *ptxstat;
+ void *tbuff;
+ int i;
+ struct txrx_desc_t *ptxrxdesc;
+ struct rx_status_t *prxstat;
+
+ tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
+
+ /* Setup TX descriptors, status, and buffers */
+ pldat->tx_desc_v = tbuff;
+ tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
+
+ pldat->tx_stat_v = tbuff;
+ tbuff += sizeof(u32) * ENET_TX_DESC;
+
+ tbuff = PTR_ALIGN(tbuff, 16);
+ pldat->tx_buff_v = tbuff;
+ tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
+
+ /* Setup RX descriptors, status, and buffers */
+ pldat->rx_desc_v = tbuff;
+ tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
+
+ tbuff = PTR_ALIGN(tbuff, 16);
+ pldat->rx_stat_v = tbuff;
+ tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
+
+ tbuff = PTR_ALIGN(tbuff, 16);
+ pldat->rx_buff_v = tbuff;
+ tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
+
+ /* Map the TX descriptors to the TX buffers in hardware */
+ for (i = 0; i < ENET_TX_DESC; i++) {
+ ptxstat = &pldat->tx_stat_v[i];
+ ptxrxdesc = &pldat->tx_desc_v[i];
+
+ ptxrxdesc->packet = __va_to_pa(
+ pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
+ ptxrxdesc->control = 0;
+ *ptxstat = 0;
+ }
+
+ /* Map the RX descriptors to the RX buffers in hardware */
+ for (i = 0; i < ENET_RX_DESC; i++) {
+ prxstat = &pldat->rx_stat_v[i];
+ ptxrxdesc = &pldat->rx_desc_v[i];
+
+ ptxrxdesc->packet = __va_to_pa(
+ pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
+ ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
+ prxstat->statusinfo = 0;
+ prxstat->statushashcrc = 0;
+ }
+
+ /* Setup base addresses in hardware to point to buffers and
+ * descriptors
+ */
+ writel((ENET_TX_DESC - 1),
+ LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
+ writel(__va_to_pa(pldat->tx_desc_v, pldat),
+ LPC_ENET_TXDESCRIPTOR(pldat->net_base));
+ writel(__va_to_pa(pldat->tx_stat_v, pldat),
+ LPC_ENET_TXSTATUS(pldat->net_base));
+ writel((ENET_RX_DESC - 1),
+ LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
+ writel(__va_to_pa(pldat->rx_desc_v, pldat),
+ LPC_ENET_RXDESCRIPTOR(pldat->net_base));
+ writel(__va_to_pa(pldat->rx_stat_v, pldat),
+ LPC_ENET_RXSTATUS(pldat->net_base));
+}
+
+static void __lpc_eth_init(struct netdata_local *pldat)
+{
+ u32 tmp;
+
+ /* Disable controller and reset */
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ tmp = readl(LPC_ENET_MAC1(pldat->net_base));
+ tmp &= ~LPC_MAC1_RECV_ENABLE;
+ writel(tmp, LPC_ENET_MAC1(pldat->net_base));
+
+ /* Initial MAC setup */
+ writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
+ writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
+ LPC_ENET_MAC2(pldat->net_base));
+ writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
+
+ /* Collision window, gap */
+ writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
+ LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
+ LPC_ENET_CLRT(pldat->net_base));
+ writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
+
+ if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
+ writel(LPC_COMMAND_PASSRUNTFRAME,
+ LPC_ENET_COMMAND(pldat->net_base));
+ else {
+ writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
+ LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
+ }
+
+ __lpc_params_setup(pldat);
+
+ /* Setup TX and RX descriptors */
+ __lpc_txrx_desc_setup(pldat);
+
+ /* Setup packet filtering */
+ writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
+ LPC_ENET_RXFILTER_CTRL(pldat->net_base));
+
+ /* Get the next TX buffer output index */
+ pldat->num_used_tx_buffs = 0;
+ pldat->last_tx_idx =
+ readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+
+ /* Clear and enable interrupts */
+ writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
+ smp_wmb();
+ lpc_eth_enable_int(pldat->net_base);
+
+ /* Enable controller */
+ tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
+ tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
+ writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
+ tmp = readl(LPC_ENET_MAC1(pldat->net_base));
+ tmp |= LPC_MAC1_RECV_ENABLE;
+ writel(tmp, LPC_ENET_MAC1(pldat->net_base));
+}
+
+static void __lpc_eth_shutdown(struct netdata_local *pldat)
+{
+ /* Reset ethernet and power down PHY */
+ __lpc_eth_reset(pldat);
+ writel(0, LPC_ENET_MAC1(pldat->net_base));
+ writel(0, LPC_ENET_MAC2(pldat->net_base));
+}
+
+/*
+ * MAC<--->PHY support functions
+ */
+static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
+{
+ struct netdata_local *pldat = bus->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ int lps;
+
+ writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
+ writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
+
+ /* Wait for unbusy status */
+ while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
+ if (time_after(jiffies, timeout))
+ return -EIO;
+ cpu_relax();
+ }
+
+ lps = readl(LPC_ENET_MRDD(pldat->net_base));
+ writel(0, LPC_ENET_MCMD(pldat->net_base));
+
+ return lps;
+}
+
+static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
+ u16 phydata)
+{
+ struct netdata_local *pldat = bus->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
+ writel(phydata, LPC_ENET_MWTD(pldat->net_base));
+
+ /* Wait for completion */
+ while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
+ if (time_after(jiffies, timeout))
+ return -EIO;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int lpc_mdio_reset(struct mii_bus *bus)
+{
+ return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
+}
+
+static void lpc_handle_link_change(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+ unsigned long flags;
+
+ bool status_change = false;
+
+ spin_lock_irqsave(&pldat->lock, flags);
+
+ if (phydev->link) {
+ if ((pldat->speed != phydev->speed) ||
+ (pldat->duplex != phydev->duplex)) {
+ pldat->speed = phydev->speed;
+ pldat->duplex = phydev->duplex;
+ status_change = true;
+ }
+ }
+
+ if (phydev->link != pldat->link) {
+ if (!phydev->link) {
+ pldat->speed = 0;
+ pldat->duplex = -1;
+ }
+ pldat->link = phydev->link;
+
+ status_change = true;
+ }
+
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
+ if (status_change)
+ __lpc_params_setup(pldat);
+}
+
+static int lpc_mii_probe(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = phy_find_first(pldat->mii_bus);
+
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ /* Attach to the PHY */
+ if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
+ netdev_info(ndev, "using MII interface\n");
+ else
+ netdev_info(ndev, "using RMII interface\n");
+ phydev = phy_connect(ndev, dev_name(&phydev->dev),
+ &lpc_handle_link_change, 0, lpc_phy_interface_mode());
+
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= PHY_BASIC_FEATURES;
+
+ phydev->advertising = phydev->supported;
+
+ pldat->link = 0;
+ pldat->speed = 0;
+ pldat->duplex = -1;
+ pldat->phy_dev = phydev;
+
+ netdev_info(ndev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ return 0;
+}
+
+static int lpc_mii_init(struct netdata_local *pldat)
+{
+ int err = -ENXIO, i;
+
+ pldat->mii_bus = mdiobus_alloc();
+ if (!pldat->mii_bus) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Setup MII mode */
+ if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
+ writel(LPC_COMMAND_PASSRUNTFRAME,
+ LPC_ENET_COMMAND(pldat->net_base));
+ else {
+ writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
+ LPC_ENET_COMMAND(pldat->net_base));
+ writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
+ }
+
+ pldat->mii_bus->name = "lpc_mii_bus";
+ pldat->mii_bus->read = &lpc_mdio_read;
+ pldat->mii_bus->write = &lpc_mdio_write;
+ pldat->mii_bus->reset = &lpc_mdio_reset;
+ snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pldat->pdev->name, pldat->pdev->id);
+ pldat->mii_bus->priv = pldat;
+ pldat->mii_bus->parent = &pldat->pdev->dev;
+
+ pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!pldat->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_1;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ pldat->mii_bus->irq[i] = PHY_POLL;
+
+ platform_set_drvdata(pldat->pdev, pldat->mii_bus);
+
+ if (mdiobus_register(pldat->mii_bus))
+ goto err_out_free_mdio_irq;
+
+ if (lpc_mii_probe(pldat->ndev) != 0)
+ goto err_out_unregister_bus;
+
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(pldat->mii_bus);
+err_out_free_mdio_irq:
+ kfree(pldat->mii_bus->irq);
+err_out_1:
+ mdiobus_free(pldat->mii_bus);
+err_out:
+ return err;
+}
+
+static void __lpc_handle_xmit(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 txcidx, *ptxstat, txstat;
+
+ txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+ while (pldat->last_tx_idx != txcidx) {
+ skb = pldat->skb[pldat->last_tx_idx];
+
+ /* A buffer is available, get buffer status */
+ ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
+ txstat = *ptxstat;
+
+ /* Next buffer and decrement used buffer counter */
+ pldat->num_used_tx_buffs--;
+ pldat->last_tx_idx++;
+ if (pldat->last_tx_idx >= ENET_TX_DESC)
+ pldat->last_tx_idx = 0;
+
+ /* Update collision counter */
+ ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
+
+ /* Any errors occurred? */
+ if (txstat & TXSTATUS_ERROR) {
+ if (txstat & TXSTATUS_UNDERRUN) {
+ /* FIFO underrun */
+ ndev->stats.tx_fifo_errors++;
+ }
+ if (txstat & TXSTATUS_LATECOLL) {
+ /* Late collision */
+ ndev->stats.tx_aborted_errors++;
+ }
+ if (txstat & TXSTATUS_EXCESSCOLL) {
+ /* Excessive collision */
+ ndev->stats.tx_aborted_errors++;
+ }
+ if (txstat & TXSTATUS_EXCESSDEFER) {
+ /* Defer limit */
+ ndev->stats.tx_aborted_errors++;
+ }
+ ndev->stats.tx_errors++;
+ } else {
+ /* Update stats */
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+
+ /* Free buffer */
+ dev_kfree_skb_irq(skb);
+ }
+
+ txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+ }
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static int __lpc_handle_recv(struct net_device *ndev, int budget)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 rxconsidx, len, ethst;
+ struct rx_status_t *prxstat;
+ u8 *prdbuf;
+ int rx_done = 0;
+
+ /* Get the current RX buffer indexes */
+ rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
+ while (rx_done < budget && rxconsidx !=
+ readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
+ /* Get pointer to receive status */
+ prxstat = &pldat->rx_stat_v[rxconsidx];
+ len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
+
+ /* Status error? */
+ ethst = prxstat->statusinfo;
+ if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
+ (RXSTATUS_ERROR | RXSTATUS_RANGE))
+ ethst &= ~RXSTATUS_ERROR;
+
+ if (ethst & RXSTATUS_ERROR) {
+ int si = prxstat->statusinfo;
+ /* Check statuses */
+ if (si & RXSTATUS_OVERRUN) {
+ /* Overrun error */
+ ndev->stats.rx_fifo_errors++;
+ } else if (si & RXSTATUS_CRC) {
+ /* CRC error */
+ ndev->stats.rx_crc_errors++;
+ } else if (si & RXSTATUS_LENGTH) {
+ /* Length error */
+ ndev->stats.rx_length_errors++;
+ } else if (si & RXSTATUS_ERROR) {
+ /* Other error */
+ ndev->stats.rx_length_errors++;
+ }
+ ndev->stats.rx_errors++;
+ } else {
+ /* Packet is good */
+ skb = dev_alloc_skb(len + 8);
+ if (!skb)
+ ndev->stats.rx_dropped++;
+ else {
+ prdbuf = skb_put(skb, len);
+
+ /* Copy packet from buffer */
+ memcpy(prdbuf, pldat->rx_buff_v +
+ rxconsidx * ENET_MAXF_SIZE, len);
+
+ /* Pass to upper layer */
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ /* Increment consume index */
+ rxconsidx = rxconsidx + 1;
+ if (rxconsidx >= ENET_RX_DESC)
+ rxconsidx = 0;
+ writel(rxconsidx,
+ LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
+ rx_done++;
+ }
+
+ return rx_done;
+}
+
+static int lpc_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct netdata_local *pldat = container_of(napi,
+ struct netdata_local, napi);
+ struct net_device *ndev = pldat->ndev;
+ int rx_done = 0;
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
+
+ __netif_tx_lock(txq, smp_processor_id());
+ __lpc_handle_xmit(ndev);
+ __netif_tx_unlock(txq);
+ rx_done = __lpc_handle_recv(ndev, budget);
+
+ if (rx_done < budget) {
+ napi_complete(napi);
+ lpc_eth_enable_int(pldat->net_base);
+ }
+
+ return rx_done;
+}
+
+static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct netdata_local *pldat = netdev_priv(ndev);
+ u32 tmp;
+
+ spin_lock(&pldat->lock);
+
+ tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
+ /* Clear interrupts */
+ writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
+
+ lpc_eth_disable_int(pldat->net_base);
+ if (likely(napi_schedule_prep(&pldat->napi)))
+ __napi_schedule(&pldat->napi);
+
+ spin_unlock(&pldat->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc_eth_close(struct net_device *ndev)
+{
+ unsigned long flags;
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ if (netif_msg_ifdown(pldat))
+ dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
+
+ napi_disable(&pldat->napi);
+ netif_stop_queue(ndev);
+
+ if (pldat->phy_dev)
+ phy_stop(pldat->phy_dev);
+
+ spin_lock_irqsave(&pldat->lock, flags);
+ __lpc_eth_reset(pldat);
+ netif_carrier_off(ndev);
+ writel(0, LPC_ENET_MAC1(pldat->net_base));
+ writel(0, LPC_ENET_MAC2(pldat->net_base));
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
+ __lpc_eth_clock_enable(pldat, false);
+
+ return 0;
+}
+
+static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ u32 len, txidx;
+ u32 *ptxstat;
+ struct txrx_desc_t *ptxrxdesc;
+
+ len = skb->len;
+
+ spin_lock_irq(&pldat->lock);
+
+ if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
+ /* This function should never be called when there are no
+ buffers */
+ netif_stop_queue(ndev);
+ spin_unlock_irq(&pldat->lock);
+ WARN(1, "BUG! TX request when no free TX buffers!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Get the next TX descriptor index */
+ txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
+
+ /* Setup control for the transfer */
+ ptxstat = &pldat->tx_stat_v[txidx];
+ *ptxstat = 0;
+ ptxrxdesc = &pldat->tx_desc_v[txidx];
+ ptxrxdesc->control =
+ (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
+
+ /* Copy data to the DMA buffer */
+ memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
+
+ /* Save the buffer and increment the buffer counter */
+ pldat->skb[txidx] = skb;
+ pldat->num_used_tx_buffs++;
+
+ /* Start transmit */
+ txidx++;
+ if (txidx >= ENET_TX_DESC)
+ txidx = 0;
+ writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
+
+ /* Stop queue if no more TX buffers */
+ if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irq(&pldat->lock);
+
+ return NETDEV_TX_OK;
+}
+
+static int lpc_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct netdata_local *pldat = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ spin_lock_irqsave(&pldat->lock, flags);
+
+ /* Set station address */
+ __lpc_set_mac(pldat, ndev->dev_addr);
+
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
+ return 0;
+}
+
+static void lpc_eth_set_multicast_list(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct netdev_hw_addr_list *mcptr = &ndev->mc;
+ struct netdev_hw_addr *ha;
+ u32 tmp32, hash_val, hashlo, hashhi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pldat->lock, flags);
+
+ /* Set station address */
+ __lpc_set_mac(pldat, ndev->dev_addr);
+
+ tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
+
+ if (ndev->flags & IFF_PROMISC)
+ tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
+ LPC_RXFLTRW_ACCEPTUMULTICAST;
+ if (ndev->flags & IFF_ALLMULTI)
+ tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
+
+ if (netdev_hw_addr_list_count(mcptr))
+ tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
+
+ writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
+
+
+ /* Set initial hash table */
+ hashlo = 0x0;
+ hashhi = 0x0;
+
+ /* 64 bits : multicast address in hash table */
+ netdev_hw_addr_list_for_each(ha, mcptr) {
+ hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
+
+ if (hash_val >= 32)
+ hashhi |= 1 << (hash_val - 32);
+ else
+ hashlo |= 1 << hash_val;
+ }
+
+ writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
+ writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
+
+ spin_unlock_irqrestore(&pldat->lock, flags);
+}
+
+static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static int lpc_eth_open(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ if (netif_msg_ifup(pldat))
+ dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ __lpc_eth_clock_enable(pldat, true);
+
+ /* Reset and initialize */
+ __lpc_eth_reset(pldat);
+ __lpc_eth_init(pldat);
+
+ /* schedule a link state check */
+ phy_start(pldat->phy_dev);
+ netif_start_queue(ndev);
+ napi_enable(&pldat->napi);
+
+ return 0;
+}
+
+/*
+ * Ethtool ops
+ */
+static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, dev_name(ndev->dev.parent));
+}
+
+static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ return pldat->msg_enable;
+}
+
+static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ pldat->msg_enable = level;
+}
+
+static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct netdata_local *pldat = netdev_priv(ndev);
+ struct phy_device *phydev = pldat->phy_dev;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static const struct ethtool_ops lpc_eth_ethtool_ops = {
+ .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
+ .get_settings = lpc_eth_ethtool_getsettings,
+ .set_settings = lpc_eth_ethtool_setsettings,
+ .get_msglevel = lpc_eth_ethtool_getmsglevel,
+ .set_msglevel = lpc_eth_ethtool_setmsglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops lpc_netdev_ops = {
+ .ndo_open = lpc_eth_open,
+ .ndo_stop = lpc_eth_close,
+ .ndo_start_xmit = lpc_eth_hard_start_xmit,
+ .ndo_set_rx_mode = lpc_eth_set_multicast_list,
+ .ndo_do_ioctl = lpc_eth_ioctl,
+ .ndo_set_mac_address = lpc_set_mac_address,
+};
+
+static int lpc_eth_drv_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct resource *dma_res;
+ struct net_device *ndev;
+ struct netdata_local *pldat;
+ struct phy_device *phydev;
+ dma_addr_t dma_handle;
+ int irq, ret;
+
+ /* Get platform resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq = platform_get_irq(pdev, 0);
+ if ((!res) || (!dma_res) || (irq < 0) || (irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "error getting resources.\n");
+ ret = -ENXIO;
+ goto err_exit;
+ }
+
+ /* Allocate net driver data structure */
+ ndev = alloc_etherdev(sizeof(struct netdata_local));
+ if (!ndev) {
+ dev_err(&pdev->dev, "could not allocate device.\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ pldat = netdev_priv(ndev);
+ pldat->pdev = pdev;
+ pldat->ndev = ndev;
+
+ spin_lock_init(&pldat->lock);
+
+ /* Save resources */
+ ndev->irq = irq;
+
+ /* Get clock for the device */
+ pldat->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pldat->clk)) {
+ dev_err(&pdev->dev, "error getting clock.\n");
+ ret = PTR_ERR(pldat->clk);
+ goto err_out_free_dev;
+ }
+
+ /* Enable network clock */
+ __lpc_eth_clock_enable(pldat, true);
+
+ /* Map IO space */
+ pldat->net_base = ioremap(res->start, res->end - res->start + 1);
+ if (!pldat->net_base) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ ret = -ENOMEM;
+ goto err_out_disable_clocks;
+ }
+ ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "error requesting interrupt.\n");
+ goto err_out_iounmap;
+ }
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(ndev);
+
+ /* Setup driver functions */
+ ndev->netdev_ops = &lpc_netdev_ops;
+ ndev->ethtool_ops = &lpc_eth_ethtool_ops;
+ ndev->watchdog_timeo = msecs_to_jiffies(2500);
+
+ /* Get size of DMA buffers/descriptors region */
+ pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
+ sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
+ pldat->dma_buff_base_v = 0;
+
+ if (use_iram_for_net()) {
+ dma_handle = dma_res->start;
+ if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
+ pldat->dma_buff_base_v =
+ io_p2v(dma_res->start);
+ else
+ netdev_err(ndev,
+ "IRAM not big enough for net buffers, using SDRAM instead.\n");
+ }
+
+ if (pldat->dma_buff_base_v == 0) {
+ pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
+
+ /* Allocate a chunk of memory for the DMA ethernet buffers
+ and descriptors */
+ pldat->dma_buff_base_v =
+ dma_alloc_coherent(&pldat->pdev->dev,
+ pldat->dma_buff_size, &dma_handle,
+ GFP_KERNEL);
+
+ if (pldat->dma_buff_base_v == NULL) {
+ dev_err(&pdev->dev, "error getting DMA region.\n");
+ ret = -ENOMEM;
+ goto err_out_free_irq;
+ }
+ }
+ pldat->dma_buff_base_p = dma_handle;
+
+ netdev_dbg(ndev, "IO address start :0x%08x\n",
+ res->start);
+ netdev_dbg(ndev, "IO address size :%d\n",
+ res->end - res->start + 1);
+ netdev_err(ndev, "IO address (mapped) :0x%p\n",
+ pldat->net_base);
+ netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
+ netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
+ netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
+ pldat->dma_buff_base_p);
+ netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
+ pldat->dma_buff_base_v);
+
+ /* Get MAC address from current HW setting (POR state is all zeros) */
+ __lpc_get_mac(pldat, ndev->dev_addr);
+
+#ifdef CONFIG_OF_NET
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ const char *macaddr = of_get_mac_address(pdev->dev.of_node);
+ if (macaddr)
+ memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
+ }
+#endif
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ dev_hw_addr_random(ndev, ndev->dev_addr);
+
+ /* Reset the ethernet controller */
+ __lpc_eth_reset(pldat);
+
+ /* then shut everything down to save power */
+ __lpc_eth_shutdown(pldat);
+
+ /* Set default parameters */
+ pldat->msg_enable = NETIF_MSG_LINK;
+
+ /* Force an MII interface reset and clock setup */
+ __lpc_mii_mngt_reset(pldat);
+
+ /* Force default PHY interface setup in chip, this will probably be
+ changed by the PHY driver */
+ pldat->link = 0;
+ pldat->speed = 100;
+ pldat->duplex = DUPLEX_FULL;
+ __lpc_params_setup(pldat);
+
+ netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_dma_unmap;
+ }
+ platform_set_drvdata(pdev, ndev);
+
+ if (lpc_mii_init(pldat) != 0)
+ goto err_out_unregister_netdev;
+
+ netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
+ res->start, ndev->irq);
+
+ phydev = pldat->phy_dev;
+
+ device_init_wakeup(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, 0);
+
+ return 0;
+
+err_out_unregister_netdev:
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+err_out_dma_unmap:
+ if (!use_iram_for_net() ||
+ pldat->dma_buff_size > lpc32xx_return_iram_size())
+ dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
+ pldat->dma_buff_base_v,
+ pldat->dma_buff_base_p);
+err_out_free_irq:
+ free_irq(ndev->irq, ndev);
+err_out_iounmap:
+ iounmap(pldat->net_base);
+err_out_disable_clocks:
+ clk_disable(pldat->clk);
+ clk_put(pldat->clk);
+err_out_free_dev:
+ free_netdev(ndev);
+err_exit:
+ pr_err("%s: not found (%d).\n", MODNAME, ret);
+ return ret;
+}
+
+static int lpc_eth_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ if (!use_iram_for_net() ||
+ pldat->dma_buff_size > lpc32xx_return_iram_size())
+ dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
+ pldat->dma_buff_base_v,
+ pldat->dma_buff_base_p);
+ free_irq(ndev->irq, ndev);
+ iounmap(pldat->net_base);
+ mdiobus_free(pldat->mii_bus);
+ clk_disable(pldat->clk);
+ clk_put(pldat->clk);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc_eth_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct netdata_local *pldat = netdev_priv(ndev);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(ndev->irq);
+
+ if (ndev) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ __lpc_eth_shutdown(pldat);
+ clk_disable(pldat->clk);
+
+ /*
+ * Reset again now clock is disable to be sure
+ * EMC_MDC is down
+ */
+ __lpc_eth_reset(pldat);
+ }
+ }
+
+ return 0;
+}
+
+static int lpc_eth_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct netdata_local *pldat;
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(ndev->irq);
+
+ if (ndev) {
+ if (netif_running(ndev)) {
+ pldat = netdev_priv(ndev);
+
+ /* Enable interface clock */
+ clk_enable(pldat->clk);
+
+ /* Reset and initialize */
+ __lpc_eth_reset(pldat);
+ __lpc_eth_init(pldat);
+
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static struct platform_driver lpc_eth_driver = {
+ .probe = lpc_eth_drv_probe,
+ .remove = __devexit_p(lpc_eth_drv_remove),
+#ifdef CONFIG_PM
+ .suspend = lpc_eth_drv_suspend,
+ .resume = lpc_eth_drv_resume,
+#endif
+ .driver = {
+ .name = MODNAME,
+ },
+};
+
+module_platform_driver(lpc_eth_driver);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("LPC Ethernet Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 00bc4fc968c7..bce01641ee6b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -20,3 +20,16 @@ config PCH_GBE
purpose use.
ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+if PCH_GBE
+
+config PCH_PTP
+ bool "PCH PTP clock support"
+ default n
+ depends on PTP_1588_CLOCK_PCH
+ ---help---
+ Say Y here if you want to use Precision Time Protocol (PTP) in the
+ driver. PTP is a method to precisely synchronize distributed clocks
+ over Ethernet networks.
+
+endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index a09a07197eb5..dd14915f54bb 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -630,6 +630,9 @@ struct pch_gbe_adapter {
unsigned long tx_queue_len;
bool have_msi;
bool rx_stop_flag;
+ int hwts_tx_en;
+ int hwts_rx_en;
+ struct pci_dev *ptp_pdev;
};
extern const char pch_driver_version[];
@@ -648,6 +651,16 @@ extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
struct pch_gbe_rx_ring *rx_ring);
extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+#ifdef CONFIG_PCH_PTP
+extern u32 pch_ch_control_read(struct pci_dev *pdev);
+extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+extern u32 pch_ch_event_read(struct pci_dev *pdev);
+extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+extern u64 pch_rx_snap_read(struct pci_dev *pdev);
+extern u64 pch_tx_snap_read(struct pci_dev *pdev);
+#endif
/* pch_gbe_param.c */
extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3ead111111e1..8035e5ff6e06 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
@@ -21,6 +21,10 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
#include <linux/module.h>
+#ifdef CONFIG_PCH_PTP
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#endif
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
@@ -95,12 +99,195 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_DISABLE_ALL 0
+#ifdef CONFIG_PCH_PTP
+/* Macros for ieee1588 */
+#define TICKS_NS_SHIFT 5
+
+/* 0x40 Time Synchronization Channel Control Register Bits */
+#define MASTER_MODE (1<<0)
+#define SLAVE_MODE (0<<0)
+#define V2_MODE (1<<31)
+#define CAP_MODE0 (0<<16)
+#define CAP_MODE2 (1<<17)
+
+/* 0x44 Time Synchronization Channel Event Register Bits */
+#define TX_SNAPSHOT_LOCKED (1<<0)
+#define RX_SNAPSHOT_LOCKED (1<<1)
+#endif
+
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
+#ifdef CONFIG_PCH_PTP
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
+static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
+{
+ u8 *data = skb->data;
+ unsigned int offset;
+ u16 *hi, *id;
+ u32 lo;
+
+ if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
+ (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
+ return 0;
+ }
+
+ offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+
+ if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
+ return 0;
+
+ hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
+ id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ memcpy(&lo, &hi[1], sizeof(lo));
+
+ return (uid_hi == *hi &&
+ uid_lo == lo &&
+ seqid == *id);
+}
+
+static void pch_rx_timestamp(
+ struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct pci_dev *pdev;
+ u64 ns;
+ u32 hi, lo, val;
+ u16 uid, seq;
+
+ if (!adapter->hwts_rx_en)
+ return;
+
+ /* Get ieee1588's dev information */
+ pdev = adapter->ptp_pdev;
+
+ val = pch_ch_event_read(pdev);
+
+ if (!(val & RX_SNAPSHOT_LOCKED))
+ return;
+
+ lo = pch_src_uuid_lo_read(pdev);
+ hi = pch_src_uuid_hi_read(pdev);
+
+ uid = hi & 0xffff;
+ seq = (hi >> 16) & 0xffff;
+
+ if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
+ goto out;
+
+ ns = pch_rx_snap_read(pdev);
+ ns <<= TICKS_NS_SHIFT;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+out:
+ pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
+}
+
+static void pch_tx_timestamp(
+ struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct pci_dev *pdev;
+ struct skb_shared_info *shtx;
+ u64 ns;
+ u32 cnt, val;
+
+ shtx = skb_shinfo(skb);
+ if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
+ shtx->tx_flags |= SKBTX_IN_PROGRESS;
+ else
+ return;
+
+ /* Get ieee1588's dev information */
+ pdev = adapter->ptp_pdev;
+
+ /*
+ * This really stinks, but we have to poll for the Tx time stamp.
+ * Usually, the time stamp is ready after 4 to 6 microseconds.
+ */
+ for (cnt = 0; cnt < 100; cnt++) {
+ val = pch_ch_event_read(pdev);
+ if (val & TX_SNAPSHOT_LOCKED)
+ break;
+ udelay(1);
+ }
+ if (!(val & TX_SNAPSHOT_LOCKED)) {
+ shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
+ return;
+ }
+
+ ns = pch_tx_snap_read(pdev);
+ ns <<= TICKS_NS_SHIFT;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
+}
+
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config cfg;
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ /* Get ieee1588's dev information */
+ pdev = adapter->ptp_pdev;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ adapter->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ adapter->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ adapter->hwts_rx_en = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ adapter->hwts_rx_en = 0;
+ pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ adapter->hwts_rx_en = 1;
+ pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ adapter->hwts_rx_en = 1;
+ pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out any old time stamps. */
+ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#endif
+
inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
{
iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
@@ -1072,6 +1259,11 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
iowrite32(tx_ring->dma +
(int)sizeof(struct pch_gbe_tx_desc) * ring_num,
&hw->reg->TX_DSC_SW_P);
+
+#ifdef CONFIG_PCH_PTP
+ pch_tx_timestamp(adapter, skb);
+#endif
+
dev_kfree_skb_any(skb);
}
@@ -1224,7 +1416,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* When request status is Receive interruption */
if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
- (adapter->rx_stop_flag == true)) {
+ (adapter->rx_stop_flag)) {
if (likely(napi_schedule_prep(&adapter->napi))) {
/* Enable only Rx Descriptor empty */
atomic_inc(&adapter->irq_sem);
@@ -1543,6 +1735,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
adapter->stats.multicast++;
/* Write meta date of skb */
skb_put(skb, length);
+
+#ifdef CONFIG_PCH_PTP
+ pch_rx_timestamp(adapter, skb);
+#endif
+
skb->protocol = eth_type_trans(skb, netdev);
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
skb->ip_summed = CHECKSUM_NONE;
@@ -1587,10 +1784,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
tx_ring->buffer_info = vzalloc(size);
- if (!tx_ring->buffer_info) {
- pr_err("Unable to allocate memory for the buffer information\n");
+ if (!tx_ring->buffer_info)
return -ENOMEM;
- }
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
@@ -1636,10 +1831,9 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
rx_ring->buffer_info = vzalloc(size);
- if (!rx_ring->buffer_info) {
- pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ if (!rx_ring->buffer_info)
return -ENOMEM;
- }
+
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -2147,6 +2341,11 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
pr_debug("cmd : 0x%04x\n", cmd);
+#ifdef CONFIG_PCH_PTP
+ if (cmd == SIOCSHWTSTAMP)
+ return hwtstamp_ioctl(netdev, ifr, cmd);
+#endif
+
return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
}
@@ -2422,8 +2621,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
if (!netdev) {
ret = -ENOMEM;
- dev_err(&pdev->dev,
- "ERR: Can't allocate and set up an Ethernet device\n");
goto err_release_pci;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -2440,6 +2637,15 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_netdev;
}
+#ifdef CONFIG_PCH_PTP
+ adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
+ PCI_DEVFN(12, 4));
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+ pr_err("Bad ptp filter\n");
+ return -EINVAL;
+ }
+#endif
+
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
netif_napi_add(netdev, &adapter->napi,
@@ -2504,7 +2710,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
+ dev_dbg(&pdev->dev, "PCH Network Connection\n");
device_set_wakeup_enable(&pdev->dev, 1);
return 0;
@@ -2605,7 +2811,7 @@ module_init(pch_gbe_init_module);
module_exit(pch_gbe_exit_module);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 3458df3780b8..0d29f5f4b8e4 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1188,11 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
@@ -1488,7 +1487,7 @@ static int hamachi_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
#ifdef RX_CHECKSUM
printk(KERN_ERR "%s: rx_copybreak non-zero "
"not good with RX_CHECKSUM\n", dev->name);
@@ -1591,12 +1590,11 @@ static int hamachi_rx(struct net_device *dev)
entry = hmp->dirty_rx % RX_RING_SIZE;
desc = &(hmp->rx_ring[entry]);
if (hmp->rx_skbuff[entry] == NULL) {
- struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index db44e9af03c3..7757b80ef924 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -397,10 +397,9 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
if (i) return i;
dev = alloc_etherdev(sizeof(*np));
- if (!dev) {
- pr_err("cannot allocate ethernet device\n");
+ if (!dev)
return -ENOMEM;
- }
+
SET_NETDEV_DEV(dev, &pdev->dev);
np = netdev_priv(dev);
@@ -744,11 +743,10 @@ static int yellowfin_init_ring(struct net_device *dev)
}
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
yp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
@@ -1134,7 +1132,7 @@ static int yellowfin_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
yp->rx_skbuff[entry] = NULL;
} else {
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
@@ -1157,11 +1155,10 @@ static int yellowfin_rx(struct net_device *dev)
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
entry = yp->dirty_rx % RX_RING_SIZE;
if (yp->rx_skbuff[entry] == NULL) {
- struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
if (skb == NULL)
break; /* Better luck next round. */
yp->rx_skbuff[entry] = skb;
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 49b549ff2c78..ddc95b0ac78d 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -238,7 +238,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
unsigned int adr0, adr1;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
@@ -643,7 +643,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
/* Entry in use? */
WARN_ON(*buff);
- skb = dev_alloc_skb(mac->bufsz);
+ skb = netdev_alloc_skb(dev, mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb))
@@ -1740,8 +1740,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev(sizeof(struct pasemi_mac));
if (dev == NULL) {
- dev_err(&pdev->dev,
- "pasemi_mac: Could not allocate ethernet device.\n");
err = -ENOMEM;
goto out_disable_device;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index a876dffd7101..b5de8a7b90f1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 77
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.77"
+#define _NETXEN_NIC_LINUX_SUBVERSION 78
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.78"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -686,6 +686,18 @@ struct netxen_recv_context {
dma_addr_t phys_addr;
};
+struct _cdrp_cmd {
+ u32 cmd;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+struct netxen_cmd_args {
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+};
+
/* New HW context creation */
#define NX_OS_CRB_RETRY_COUNT 4000
@@ -942,7 +954,7 @@ typedef struct nx_mac_list_s {
struct nx_vlan_ip_list {
struct list_head list;
- u32 ip_addr;
+ __be32 ip_addr;
};
/*
@@ -1142,6 +1154,7 @@ typedef struct {
#define NETXEN_NIC_LRO_DISABLED 0x00
#define NETXEN_NIC_BRIDGE_ENABLED 0X10
#define NETXEN_NIC_DIAG_ENABLED 0x20
+#define NETXEN_FW_RESET_OWNER 0x40
#define NETXEN_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
@@ -1159,6 +1172,419 @@ typedef struct {
#define __NX_DEV_UP 1
#define __NX_RESETTING 2
+/* Mini Coredump FW supported version */
+#define NX_MD_SUPPORT_MAJOR 4
+#define NX_MD_SUPPORT_MINOR 0
+#define NX_MD_SUPPORT_SUBVERSION 579
+
+#define LSW(x) ((uint16_t)(x))
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+/* Mini Coredump mask level */
+#define NX_DUMP_MASK_MIN 0x03
+#define NX_DUMP_MASK_DEF 0x1f
+#define NX_DUMP_MASK_MAX 0xff
+
+/* Mini Coredump CDRP commands */
+#define NX_CDRP_CMD_TEMP_SIZE 0x0000002f
+#define NX_CDRP_CMD_GET_TEMP_HDR 0x00000030
+
+
+#define NX_DUMP_STATE_ARRAY_LEN 16
+#define NX_DUMP_CAP_SIZE_ARRAY_LEN 8
+
+/* Mini Coredump sysfs entries flags*/
+#define NX_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define NX_ENABLE_FW_DUMP 0xaddfeed
+#define NX_DISABLE_FW_DUMP 0xbadfeed
+#define NX_FORCE_FW_RESET 0xdeaddead
+
+
+/* Flash read/write address */
+#define NX_FW_DUMP_REG1 0x00130060
+#define NX_FW_DUMP_REG2 0x001e0000
+#define NX_FLASH_SEM2_LK 0x0013C010
+#define NX_FLASH_SEM2_ULK 0x0013C014
+#define NX_FLASH_LOCK_ID 0x001B2100
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+/* Mini Coredump register read/write routine */
+#define NX_RD_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
+ NX_FW_DUMP_REG1)); \
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
+ *data = readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + \
+ LSW(addr))); \
+} while (0)
+
+#define NX_WR_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
+ NX_FW_DUMP_REG1)); \
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
+ writel(data, (void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));\
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr))); \
+} while (0)
+
+
+/*
+Entry Type Defines
+*/
+
+#define RDNOP 0
+#define RDCRB 1
+#define RDMUX 2
+#define QUEUE 3
+#define BOARD 4
+#define RDSRE 5
+#define RDOCM 6
+#define PREGS 7
+#define L1DTG 8
+#define L1ITG 9
+#define CACHE 10
+
+#define L1DAT 11
+#define L1INS 12
+#define RDSTK 13
+#define RDCON 14
+
+#define L2DTG 21
+#define L2ITG 22
+#define L2DAT 23
+#define L2INS 24
+#define RDOC3 25
+
+#define MEMBK 32
+
+#define RDROM 71
+#define RDMEM 72
+#define RDMN 73
+
+#define INFOR 81
+#define CNTRL 98
+
+#define TLHDR 99
+#define RDEND 255
+
+#define PRIMQ 103
+#define SQG2Q 104
+#define SQG3Q 105
+
+/*
+* Opcodes for Control Entries.
+* These Flags are bit fields.
+*/
+#define NX_DUMP_WCRB 0x01
+#define NX_DUMP_RWCRB 0x02
+#define NX_DUMP_ANDCRB 0x04
+#define NX_DUMP_ORCRB 0x08
+#define NX_DUMP_POLLCRB 0x10
+#define NX_DUMP_RD_SAVE 0x20
+#define NX_DUMP_WRT_SAVED 0x40
+#define NX_DUMP_MOD_SAVE_ST 0x80
+
+/* Driver Flags */
+#define NX_DUMP_SKIP 0x80 /* driver skipped this entry */
+#define NX_DUMP_SIZE_ERR 0x40 /*entry size vs capture size mismatch*/
+
+#define NX_PCI_READ_32(ADDR) readl((ADDR))
+#define NX_PCI_WRITE_32(DATA, ADDR) writel(DATA, (ADDR))
+
+
+
+struct netxen_minidump {
+ u32 pos; /* position in the dump buffer */
+ u8 fw_supports_md; /* FW supports Mini cordump */
+ u8 has_valid_dump; /* indicates valid dump */
+ u8 md_capture_mask; /* driver capture mask */
+ u8 md_enabled; /* Turn Mini Coredump on/off */
+ u32 md_dump_size; /* Total FW Mini Coredump size */
+ u32 md_capture_size; /* FW dump capture size */
+ u32 md_template_size; /* FW template size */
+ u32 md_template_ver; /* FW template version */
+ u64 md_timestamp; /* FW Mini dump timestamp */
+ void *md_template; /* FW template will be stored */
+ void *md_capture_buff; /* FW dump will be stored */
+};
+
+
+
+struct netxen_minidump_template_hdr {
+ u32 entry_type;
+ u32 first_entry_offset;
+ u32 size_of_template;
+ u32 capture_mask;
+ u32 num_of_entries;
+ u32 version;
+ u32 driver_timestamp;
+ u32 checksum;
+ u32 driver_capture_mask;
+ u32 driver_info_word2;
+ u32 driver_info_word3;
+ u32 driver_info_word4;
+ u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
+ u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
+ u32 rsvd[0];
+};
+
+/* Common Entry Header: Common to All Entry Types */
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+
+struct netxen_common_entry_hdr {
+ u32 entry_type;
+ u32 entry_size;
+ u32 entry_capture_size;
+ union {
+ struct {
+ u8 entry_capture_mask;
+ u8 entry_code;
+ u8 driver_code;
+ u8 driver_flags;
+ };
+ u32 entry_ctrl_word;
+ };
+};
+
+
+/* Generic Entry Including Header */
+struct netxen_minidump_entry {
+ struct netxen_common_entry_hdr hdr;
+ u32 entry_data00;
+ u32 entry_data01;
+ u32 entry_data02;
+ u32 entry_data03;
+ u32 entry_data04;
+ u32 entry_data05;
+ u32 entry_data06;
+ u32 entry_data07;
+};
+
+/* Read ROM Header */
+struct netxen_minidump_entry_rdrom {
+ struct netxen_common_entry_hdr h;
+ union {
+ struct {
+ u32 select_addr_reg;
+ };
+ u32 rsvd_0;
+ };
+ union {
+ struct {
+ u8 addr_stride;
+ u8 addr_cnt;
+ u16 data_size;
+ };
+ u32 rsvd_1;
+ };
+ union {
+ struct {
+ u32 op_count;
+ };
+ u32 rsvd_2;
+ };
+ union {
+ struct {
+ u32 read_addr_reg;
+ };
+ u32 rsvd_3;
+ };
+ union {
+ struct {
+ u32 write_mask;
+ };
+ u32 rsvd_4;
+ };
+ union {
+ struct {
+ u32 read_mask;
+ };
+ u32 rsvd_5;
+ };
+ u32 read_addr;
+ u32 read_data_size;
+};
+
+
+/* Read CRB and Control Entry Header */
+struct netxen_minidump_entry_crb {
+ struct netxen_common_entry_hdr h;
+ u32 addr;
+ union {
+ struct {
+ u8 addr_stride;
+ u8 state_index_a;
+ u16 poll_timeout;
+ };
+ u32 addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ union {
+ struct {
+ u8 opcode;
+ u8 state_index_v;
+ u8 shl;
+ u8 shr;
+ };
+ u32 control_value;
+ };
+ u32 value_1;
+ u32 value_2;
+ u32 value_3;
+};
+
+/* Read Memory and MN Header */
+struct netxen_minidump_entry_rdmem {
+ struct netxen_common_entry_hdr h;
+ union {
+ struct {
+ u32 select_addr_reg;
+ };
+ u32 rsvd_0;
+ };
+ union {
+ struct {
+ u8 addr_stride;
+ u8 addr_cnt;
+ u16 data_size;
+ };
+ u32 rsvd_1;
+ };
+ union {
+ struct {
+ u32 op_count;
+ };
+ u32 rsvd_2;
+ };
+ union {
+ struct {
+ u32 read_addr_reg;
+ };
+ u32 rsvd_3;
+ };
+ union {
+ struct {
+ u32 cntrl_addr_reg;
+ };
+ u32 rsvd_4;
+ };
+ union {
+ struct {
+ u8 wr_byte0;
+ u8 wr_byte1;
+ u8 poll_mask;
+ u8 poll_cnt;
+ };
+ u32 rsvd_5;
+ };
+ u32 read_addr;
+ u32 read_data_size;
+};
+
+/* Read Cache L1 and L2 Header */
+struct netxen_minidump_entry_cache {
+ struct netxen_common_entry_hdr h;
+ u32 tag_reg_addr;
+ union {
+ struct {
+ u16 tag_value_stride;
+ u16 init_tag_value;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 control_addr;
+ union {
+ struct {
+ u16 write_value;
+ u8 poll_mask;
+ u8 poll_wait;
+ };
+ u32 control_value;
+ };
+ u32 read_addr;
+ union {
+ struct {
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u16 rsvd_1;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+/* Read OCM Header */
+struct netxen_minidump_entry_rdocm {
+ struct netxen_common_entry_hdr h;
+ u32 rsvd_0;
+ union {
+ struct {
+ u32 rsvd_1;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 rsvd_2;
+ u32 rsvd_3;
+ u32 read_addr;
+ union {
+ struct {
+ u32 read_addr_stride;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+/* Read MUX Header */
+struct netxen_minidump_entry_mux {
+ struct netxen_common_entry_hdr h;
+ u32 select_addr;
+ union {
+ struct {
+ u32 rsvd_0;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 select_value;
+ u32 select_value_stride;
+ u32 read_addr;
+ u32 rsvd_1;
+};
+
+/* Read Queue Header */
+struct netxen_minidump_entry_queue {
+ struct netxen_common_entry_hdr h;
+ u32 select_addr;
+ union {
+ struct {
+ u16 queue_id_stride;
+ u16 rsvd_0;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 rsvd_1;
+ u32 rsvd_2;
+ u32 read_addr;
+ union {
+ struct {
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u16 rsvd_3;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
struct netxen_dummy_dma {
void *addr;
dma_addr_t phys_addr;
@@ -1263,6 +1689,8 @@ struct netxen_adapter {
__le32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
+ struct netxen_minidump mdump; /* mdump ptr */
+ int fw_mdump_rdy; /* for mdump ready */
};
int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
@@ -1352,7 +1780,7 @@ int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max);
void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
int netxen_config_rss(struct netxen_adapter *adapter, int enable);
-int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd);
int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
@@ -1365,13 +1793,16 @@ int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
-
+int netxen_setup_minidump(struct netxen_adapter *adapter);
+void netxen_dump_fw(struct netxen_adapter *adapter);
void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
struct nx_host_tx_ring *tx_ring);
/* Functions from netxen_nic_main.c */
int netxen_nic_reset_context(struct netxen_adapter *);
+int nx_dev_request_reset(struct netxen_adapter *adapter);
+
/*
* NetXen Board information
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index a925392abd6f..f3c0057a802b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -48,28 +48,27 @@ netxen_poll_rsp(struct netxen_adapter *adapter)
}
static u32
-netxen_issue_cmd(struct netxen_adapter *adapter,
- u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
{
u32 rsp;
u32 signature = 0;
u32 rcode = NX_RCODE_SUCCESS;
- signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
-
+ signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
+ NXHAL_VERSION);
/* Acquire semaphore before accessing CRB */
if (netxen_api_lock(adapter))
return NX_RCODE_TIMEOUT;
NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
- NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
+ NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
- NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
+ NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
- NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
+ NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
- NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
+ NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
rsp = netxen_poll_rsp(adapter);
@@ -83,28 +82,179 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
printk(KERN_ERR "%s: failed card response code:0x%x\n",
netxen_nic_driver_name, rcode);
+ } else if (rsp == NX_CDRP_RSP_OK) {
+ cmd->rsp.cmd = NX_RCODE_SUCCESS;
+ if (cmd->rsp.arg2)
+ cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
+ if (cmd->rsp.arg3)
+ cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
}
+ if (cmd->rsp.arg1)
+ cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
/* Release semaphore */
netxen_api_unlock(adapter);
return rcode;
}
+static int
+netxen_get_minidump_template_size(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_args cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ netxen_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Can't get template size %d\n", cmd.rsp.cmd);
+ return -EIO;
+ }
+ adapter->mdump.md_template_size = cmd.rsp.arg2;
+ adapter->mdump.md_template_ver = cmd.rsp.arg3;
+ return 0;
+}
+
+static int
+netxen_get_minidump_template(struct netxen_adapter *adapter)
+{
+ dma_addr_t md_template_addr;
+ void *addr;
+ u32 size;
+ struct netxen_cmd_args cmd;
+ size = adapter->mdump.md_template_size;
+
+ if (size == 0) {
+ dev_err(&adapter->pdev->dev, "Can not capture Minidump "
+ "template. Invalid template size.\n");
+ return NX_RCODE_INVALID_ARGS;
+ }
+
+ addr = pci_alloc_consistent(adapter->pdev, size, &md_template_addr);
+
+ if (!addr) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
+ return -ENOMEM;
+ }
+
+ memset(addr, 0, size);
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
+ cmd.req.arg1 = LSD(md_template_addr);
+ cmd.req.arg2 = MSD(md_template_addr);
+ cmd.req.arg3 |= size;
+ netxen_issue_cmd(adapter, &cmd);
+
+ if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
+ memcpy(adapter->mdump.md_template, addr, size);
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
+ "err_code : %d, requested_size : %d, actual_size : %d\n ",
+ cmd.rsp.cmd, size, cmd.rsp.arg2);
+ }
+ pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+ return 0;
+}
+
+static u32
+netxen_check_template_checksum(struct netxen_adapter *adapter)
+{
+ u64 sum = 0 ;
+ u32 *buff = adapter->mdump.md_template;
+ int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
+
+ while (count-- > 0)
+ sum += *buff++ ;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
+
+ return ~sum;
+}
+
+int
+netxen_setup_minidump(struct netxen_adapter *adapter)
+{
+ int err = 0, i;
+ u32 *template, *tmp_buf;
+ struct netxen_minidump_template_hdr *hdr;
+ err = netxen_get_minidump_template_size(adapter);
+ if (err) {
+ adapter->mdump.fw_supports_md = 0;
+ if ((err == NX_RCODE_CMD_INVALID) ||
+ (err == NX_RCODE_CMD_NOT_IMPL)) {
+ dev_info(&adapter->pdev->dev,
+ "Flashed firmware version does not support minidump, "
+ "minimum version required is [ %u.%u.%u ].\n ",
+ NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
+ NX_MD_SUPPORT_SUBVERSION);
+ }
+ return err;
+ }
+
+ if (!adapter->mdump.md_template_size) {
+ dev_err(&adapter->pdev->dev, "Error : Invalid template size "
+ ",should be non-zero.\n");
+ return -EIO;
+ }
+ adapter->mdump.md_template =
+ kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
+
+ if (!adapter->mdump.md_template) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate memory "
+ "for minidump template.\n");
+ return -ENOMEM;
+ }
+
+ err = netxen_get_minidump_template(adapter);
+ if (err) {
+ if (err == NX_RCODE_CMD_NOT_IMPL)
+ adapter->mdump.fw_supports_md = 0;
+ goto free_template;
+ }
+
+ if (netxen_check_template_checksum(adapter)) {
+ dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
+ err = -EIO;
+ goto free_template;
+ }
+
+ adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
+ tmp_buf = (u32 *) adapter->mdump.md_template;
+ template = (u32 *) adapter->mdump.md_template;
+ for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
+ *template++ = __le32_to_cpu(*tmp_buf++);
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ adapter->mdump.md_capture_buff = NULL;
+ adapter->mdump.fw_supports_md = 1;
+ adapter->mdump.md_enabled = 1;
+
+ return err;
+
+free_template:
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+ return err;
+}
+
+
int
nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
{
u32 rcode = NX_RCODE_SUCCESS;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = mtu;
+ cmd.req.arg3 = 0;
if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
- rcode = netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- recv_ctx->context_id,
- mtu,
- 0,
- NX_CDRP_CMD_SET_MTU);
+ netxen_issue_cmd(adapter, &cmd);
if (rcode != NX_RCODE_SUCCESS)
return -EIO;
@@ -116,15 +266,14 @@ int
nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
u32 speed, u32 duplex, u32 autoneg)
{
-
- return netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- speed,
- duplex,
- autoneg,
- NX_CDRP_CMD_CONFIG_GBE_PORT);
-
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
+ cmd.req.arg1 = speed;
+ cmd.req.arg2 = duplex;
+ cmd.req.arg3 = autoneg;
+ return netxen_issue_cmd(adapter, &cmd);
}
static int
@@ -139,6 +288,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
nx_cardrsp_sds_ring_t *prsp_sds;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
+ struct netxen_cmd_args cmd;
dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
u64 phys_addr;
@@ -218,13 +368,12 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
}
phys_addr = hostrq_phys_addr;
- err = netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- (u32)(phys_addr >> 32),
- (u32)(phys_addr & 0xffffffff),
- rq_size,
- NX_CDRP_CMD_CREATE_RX_CTX);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
+ err = netxen_issue_cmd(adapter, &cmd);
if (err) {
printk(KERN_WARNING
"Failed to create rx ctx in firmware%d\n", err);
@@ -273,15 +422,15 @@ static void
nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
- if (netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- recv_ctx->context_id,
- NX_DESTROY_CTX_RESET,
- 0,
- NX_CDRP_CMD_DESTROY_RX_CTX)) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
+ if (netxen_issue_cmd(adapter, &cmd)) {
printk(KERN_WARNING
"%s: Failed to destroy rx ctx in firmware\n",
netxen_nic_driver_name);
@@ -302,6 +451,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
dma_addr_t rq_phys_addr, rsp_phys_addr;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
rq_addr = pci_alloc_consistent(adapter->pdev,
@@ -345,13 +495,12 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
- err = netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- (u32)(phys_addr >> 32),
- ((u32)phys_addr & 0xffffffff),
- rq_size,
- NX_CDRP_CMD_CREATE_TX_CTX);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
+ err = netxen_issue_cmd(adapter, &cmd);
if (err == NX_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
@@ -380,14 +529,14 @@ out_free_rq:
static void
nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
{
- if (netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- adapter->tx_context_id,
- NX_DESTROY_CTX_RESET,
- 0,
- NX_CDRP_CMD_DESTROY_TX_CTX)) {
-
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->tx_context_id;
+ cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
+ if (netxen_issue_cmd(adapter, &cmd)) {
printk(KERN_WARNING
"%s: Failed to destroy tx ctx in firmware\n",
netxen_nic_driver_name);
@@ -398,34 +547,37 @@ int
nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
{
u32 rcode;
-
- rcode = netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- reg,
- 0,
- 0,
- NX_CDRP_CMD_READ_PHY);
-
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = reg;
+ cmd.req.arg2 = 0;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
+ cmd.rsp.arg1 = 1;
+ rcode = netxen_issue_cmd(adapter, &cmd);
if (rcode != NX_RCODE_SUCCESS)
return -EIO;
- return NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+ if (val == NULL)
+ return -EIO;
+
+ *val = cmd.rsp.arg1;
+ return 0;
}
int
nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
{
u32 rcode;
-
- rcode = netxen_issue_cmd(adapter,
- adapter->ahw.pci_func,
- NXHAL_VERSION,
- reg,
- val,
- 0,
- NX_CDRP_CMD_WRITE_PHY);
-
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = reg;
+ cmd.req.arg2 = val;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
+ rcode = netxen_issue_cmd(adapter, &cmd);
if (rcode != NX_RCODE_SUCCESS)
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 8a371985319f..8c39299331a2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -248,6 +248,11 @@ skip:
}
}
+ if (!netif_running(dev) || !adapter->ahw.linkup) {
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ }
+
return 0;
}
@@ -812,6 +817,107 @@ static int netxen_get_intr_coalesce(struct net_device *netdev,
return 0;
}
+static int
+netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+ if (adapter->fw_mdump_rdy)
+ dump->len = mdump->md_dump_size;
+ else
+ dump->len = 0;
+ dump->flag = mdump->md_capture_mask;
+ dump->version = adapter->fw_version;
+ return 0;
+}
+
+static int
+netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ int ret = 0;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+
+ switch (val->flag) {
+ case NX_FORCE_FW_DUMP_KEY:
+ if (!mdump->md_enabled)
+ mdump->md_enabled = 1;
+ if (adapter->fw_mdump_rdy) {
+ netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
+ return ret;
+ }
+ netdev_info(netdev, "Forcing a fw dump\n");
+ nx_dev_request_reset(adapter);
+ break;
+ case NX_DISABLE_FW_DUMP:
+ if (mdump->md_enabled) {
+ netdev_info(netdev, "Disabling FW Dump\n");
+ mdump->md_enabled = 0;
+ }
+ break;
+ case NX_ENABLE_FW_DUMP:
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "Enabling FW dump\n");
+ mdump->md_enabled = 1;
+ }
+ break;
+ case NX_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing FW reset\n");
+ nx_dev_request_reset(adapter);
+ adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+ break;
+ default:
+ if (val->flag <= NX_DUMP_MASK_MAX &&
+ val->flag >= NX_DUMP_MASK_MIN) {
+ mdump->md_capture_mask = val->flag & 0xff;
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n",
+ mdump->md_capture_mask);
+ break;
+ }
+ netdev_info(netdev,
+ "Invalid dump level: 0x%x\n", val->flag);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ int i, copy_sz;
+ u32 *hdr_ptr, *data;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+
+
+ if (!adapter->fw_mdump_rdy) {
+ netdev_info(netdev, "Dump not available\n");
+ return -EINVAL;
+ }
+ /* Copy template header first */
+ copy_sz = mdump->md_template_size;
+ hdr_ptr = (u32 *) mdump->md_template;
+ data = buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz,
+ mdump->md_capture_buff + mdump->md_template_size,
+ mdump->md_capture_size);
+ dump->len = copy_sz + mdump->md_capture_size;
+ dump->flag = mdump->md_capture_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(mdump->md_capture_buff);
+ mdump->md_capture_buff = NULL;
+ adapter->fw_mdump_rdy = 0;
+ netdev_info(netdev, "extracted the fw dump Successfully\n");
+ return 0;
+}
+
const struct ethtool_ops netxen_nic_ethtool_ops = {
.get_settings = netxen_nic_get_settings,
.set_settings = netxen_nic_set_settings,
@@ -833,4 +939,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.get_sset_count = netxen_get_sset_count,
.get_coalesce = netxen_get_intr_coalesce,
.set_coalesce = netxen_set_intr_coalesce,
+ .get_dump_flag = netxen_get_dump_flag,
+ .get_dump_data = netxen_get_dump_data,
+ .set_dump = netxen_set_dump,
};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index dc1967c1f312..b1a897cd9a8d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -969,6 +969,7 @@ enum {
#define NX_RCODE_FATAL_ERROR 0x80000000
#define NX_FWERROR_PEGNUM(code) ((code) & 0xff)
#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+#define NX_FWERROR_PEGSTAT1(code) ((code >> 8) & 0x1fffff)
#define FW_POLL_DELAY (2 * HZ)
#define FW_FAIL_THRESH 3
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 3f89e57cae50..de96a948bb7f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -46,7 +46,6 @@ static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
void __iomem *addr, u32 data);
static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
void __iomem *addr);
-
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
@@ -910,7 +909,7 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
return rv;
}
-int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd)
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd)
{
nx_nic_req_t req;
u64 word;
@@ -923,7 +922,7 @@ int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd)
req.req_hdr = cpu_to_le64(word);
req.words[0] = cpu_to_le64(cmd);
- req.words[1] = cpu_to_le64(ip);
+ memcpy(&req.words[1], &ip, sizeof(u32));
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
@@ -1051,7 +1050,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
return -1;
- if (*mac == cpu_to_le64(~0ULL)) {
+ if (*mac == ~0ULL) {
offset = NX_OLD_MAC_ADDR_OFFSET +
(adapter->portnum * sizeof(u64));
@@ -1060,7 +1059,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
offset, sizeof(u64), pmac) == -1)
return -1;
- if (*mac == cpu_to_le64(~0ULL))
+ if (*mac == ~0ULL)
return -1;
}
return 0;
@@ -1974,3 +1973,631 @@ netxen_nic_wol_supported(struct netxen_adapter *adapter)
return 0;
}
+
+static u32 netxen_md_cntrl(struct netxen_adapter *adapter,
+ struct netxen_minidump_template_hdr *template_hdr,
+ struct netxen_minidump_entry_crb *crtEntry)
+{
+ int loop_cnt, i, rv = 0, timeout_flag;
+ u32 op_count, stride;
+ u32 opcode, read_value, addr;
+ unsigned long timeout, timeout_jiffies;
+ addr = crtEntry->addr;
+ op_count = crtEntry->op_count;
+ stride = crtEntry->addr_stride;
+
+ for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+ for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) {
+ opcode = (crtEntry->opcode & (0x1 << i));
+ if (opcode) {
+ switch (opcode) {
+ case NX_DUMP_WCRB:
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ crtEntry->value_1);
+ break;
+ case NX_DUMP_RWCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_ANDCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ read_value &= crtEntry->value_2;
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_ORCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ read_value |= crtEntry->value_3;
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_POLLCRB:
+ timeout = crtEntry->poll_timeout;
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ timeout_jiffies =
+ msecs_to_jiffies(timeout) + jiffies;
+ for (timeout_flag = 0;
+ !timeout_flag
+ && ((read_value & crtEntry->value_2)
+ != crtEntry->value_1);) {
+ if (time_after(jiffies,
+ timeout_jiffies))
+ timeout_flag = 1;
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ }
+
+ if (timeout_flag) {
+ dev_err(&adapter->pdev->dev, "%s : "
+ "Timeout in poll_crb control operation.\n"
+ , __func__);
+ return -1;
+ }
+ break;
+ case NX_DUMP_RD_SAVE:
+ /* Decide which address to use */
+ if (crtEntry->state_index_a)
+ addr =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_a];
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v]
+ = read_value;
+ break;
+ case NX_DUMP_WRT_SAVED:
+ /* Decide which value to use */
+ if (crtEntry->state_index_v)
+ read_value =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v];
+ else
+ read_value = crtEntry->value_1;
+
+ /* Decide which address to use */
+ if (crtEntry->state_index_a)
+ addr =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_a];
+
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_MOD_SAVE_ST:
+ read_value =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v];
+ read_value <<= crtEntry->shl;
+ read_value >>= crtEntry->shr;
+ if (crtEntry->value_2)
+ read_value &=
+ crtEntry->value_2;
+ read_value |= crtEntry->value_3;
+ read_value += crtEntry->value_1;
+ /* Write value back to state area.*/
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v]
+ = read_value;
+ break;
+ default:
+ rv = 1;
+ break;
+ }
+ }
+ }
+ addr = addr + stride;
+ }
+ return rv;
+}
+
+/* Read memory or MN */
+static u32
+netxen_md_rdmem(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdmem
+ *memEntry, u64 *data_buff)
+{
+ u64 addr, value = 0;
+ int i = 0, loop_cnt;
+
+ addr = (u64)memEntry->read_addr;
+ loop_cnt = memEntry->read_data_size; /* This is size in bytes */
+ loop_cnt /= sizeof(value);
+
+ for (i = 0; i < loop_cnt; i++) {
+ if (netxen_nic_pci_mem_read_2M(adapter, addr, &value))
+ goto out;
+ *data_buff++ = value;
+ addr += sizeof(value);
+ }
+out:
+ return i * sizeof(value);
+}
+
+/* Read CRB operation */
+static u32 netxen_md_rd_crb(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_crb
+ *crbEntry, u32 *data_buff)
+{
+ int loop_cnt;
+ u32 op_count, addr, stride, value;
+
+ addr = crbEntry->addr;
+ op_count = crbEntry->op_count;
+ stride = crbEntry->addr_stride;
+
+ for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+ NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value);
+ *data_buff++ = addr;
+ *data_buff++ = value;
+ addr = addr + stride;
+ }
+ return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Read ROM */
+static u32
+netxen_md_rdrom(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdrom
+ *romEntry, __le32 *data_buff)
+{
+ int i, count = 0;
+ u32 size, lck_val;
+ u32 val;
+ u32 fl_addr, waddr, raddr;
+ fl_addr = romEntry->read_addr;
+ size = romEntry->read_data_size/4;
+lock_try:
+ lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 +
+ NX_FLASH_SEM2_LK));
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ msleep(20);
+ count++;
+ goto lock_try;
+ }
+ writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 +
+ NX_FLASH_LOCK_ID));
+ for (i = 0; i < size; i++) {
+ waddr = fl_addr & 0xFFFF0000;
+ NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr);
+ raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF);
+ NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val);
+ *data_buff++ = cpu_to_le32(val);
+ fl_addr += sizeof(val);
+ }
+ readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK));
+ return romEntry->read_data_size;
+}
+
+/* Handle L2 Cache */
+static u32
+netxen_md_L2Cache(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_cache
+ *cacheEntry, u32 *data_buff)
+{
+ int loop_cnt, i, k, timeout_flag = 0;
+ u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+ u32 tag_value, read_cnt;
+ u8 cntl_value_w, cntl_value_r;
+ unsigned long timeout, timeout_jiffies;
+
+ loop_cnt = cacheEntry->op_count;
+ read_addr = cacheEntry->read_addr;
+ cntrl_addr = cacheEntry->control_addr;
+ cntl_value_w = (u32) cacheEntry->write_value;
+ tag_reg_addr = cacheEntry->tag_reg_addr;
+ tag_value = cacheEntry->init_tag_value;
+ read_cnt = cacheEntry->read_addr_cnt;
+
+ for (i = 0; i < loop_cnt; i++) {
+ NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+ if (cntl_value_w)
+ NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ (u32)cntl_value_w);
+ if (cacheEntry->poll_mask) {
+ timeout = cacheEntry->poll_wait;
+ NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ &cntl_value_r);
+ timeout_jiffies = msecs_to_jiffies(timeout) + jiffies;
+ for (timeout_flag = 0; !timeout_flag &&
+ ((cntl_value_r & cacheEntry->poll_mask) != 0);) {
+ if (time_after(jiffies, timeout_jiffies))
+ timeout_flag = 1;
+ NX_RD_DUMP_REG(cntrl_addr,
+ adapter->ahw.pci_base0,
+ &cntl_value_r);
+ }
+ if (timeout_flag) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout in processing L2 Tag poll.\n");
+ return -1;
+ }
+ }
+ addr = read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ addr += cacheEntry->read_addr_stride;
+ }
+ tag_value += cacheEntry->tag_value_stride;
+ }
+ return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+
+/* Handle L1 Cache */
+static u32 netxen_md_L1Cache(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_cache
+ *cacheEntry, u32 *data_buff)
+{
+ int i, k, loop_cnt;
+ u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+ u32 tag_value, read_cnt;
+ u8 cntl_value_w;
+
+ loop_cnt = cacheEntry->op_count;
+ read_addr = cacheEntry->read_addr;
+ cntrl_addr = cacheEntry->control_addr;
+ cntl_value_w = (u32) cacheEntry->write_value;
+ tag_reg_addr = cacheEntry->tag_reg_addr;
+ tag_value = cacheEntry->init_tag_value;
+ read_cnt = cacheEntry->read_addr_cnt;
+
+ for (i = 0; i < loop_cnt; i++) {
+ NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+ NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ (u32) cntl_value_w);
+ addr = read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ addr += cacheEntry->read_addr_stride;
+ }
+ tag_value += cacheEntry->tag_value_stride;
+ }
+ return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+/* Reading OCM memory */
+static u32
+netxen_md_rdocm(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdocm
+ *ocmEntry, u32 *data_buff)
+{
+ int i, loop_cnt;
+ u32 value;
+ void __iomem *addr;
+ addr = (ocmEntry->read_addr + adapter->ahw.pci_base0);
+ loop_cnt = ocmEntry->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ value = readl(addr);
+ *data_buff++ = value;
+ addr += ocmEntry->read_addr_stride;
+ }
+ return i * sizeof(u32);
+}
+
+/* Read MUX data */
+static u32
+netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux
+ *muxEntry, u32 *data_buff)
+{
+ int loop_cnt = 0;
+ u32 read_addr, read_value, select_addr, sel_value;
+
+ read_addr = muxEntry->read_addr;
+ sel_value = muxEntry->select_value;
+ select_addr = muxEntry->select_addr;
+
+ for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
+ NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value);
+ NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value);
+ *data_buff++ = sel_value;
+ *data_buff++ = read_value;
+ sel_value += muxEntry->select_value_stride;
+ }
+ return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Handling Queue State Reads */
+static u32
+netxen_md_rdqueue(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_queue
+ *queueEntry, u32 *data_buff)
+{
+ int loop_cnt, k;
+ u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt;
+
+ read_cnt = queueEntry->read_addr_cnt;
+ read_stride = queueEntry->read_addr_stride;
+ select_addr = queueEntry->select_addr;
+
+ for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
+ loop_cnt++) {
+ NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
+ read_addr = queueEntry->read_addr;
+ for (k = 0; k < read_cnt; k--) {
+ NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ read_addr += read_stride;
+ }
+ queue_id += queueEntry->queue_id_stride;
+ }
+ return loop_cnt * (read_cnt * sizeof(read_value));
+}
+
+
+/*
+* We catch an error where driver does not read
+* as much data as we expect from the entry.
+*/
+
+static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry *entry, int esize)
+{
+ if (esize < 0) {
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ return esize;
+ }
+ if (esize != entry->hdr.entry_capture_size) {
+ entry->hdr.entry_capture_size = esize;
+ entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR;
+ dev_info(&adapter->pdev->dev,
+ "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.entry_type, entry->hdr.entry_capture_mask,
+ esize, entry->hdr.entry_capture_size);
+ dev_info(&adapter->pdev->dev, "Aborting further dump capture\n");
+ }
+ return 0;
+}
+
+static int netxen_parse_md_template(struct netxen_adapter *adapter)
+{
+ int num_of_entries, buff_level, e_cnt, esize;
+ int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+ char *dbuff;
+ void *template_buff = adapter->mdump.md_template;
+ char *dump_buff = adapter->mdump.md_capture_buff;
+ int capture_mask = adapter->mdump.md_capture_mask;
+ struct netxen_minidump_template_hdr *template_hdr;
+ struct netxen_minidump_entry *entry;
+
+ if ((capture_mask & 0x3) != 0x3) {
+ dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed "
+ "for valid firmware dump\n", capture_mask);
+ return -EINVAL;
+ }
+ template_hdr = (struct netxen_minidump_template_hdr *) template_buff;
+ num_of_entries = template_hdr->num_of_entries;
+ entry = (struct netxen_minidump_entry *) ((char *) template_buff +
+ template_hdr->first_entry_offset);
+ memcpy(dump_buff, template_buff, adapter->mdump.md_template_size);
+ dump_buff = dump_buff + adapter->mdump.md_template_size;
+
+ if (template_hdr->entry_type == TLHDR)
+ sane_start = 1;
+
+ for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) {
+ if (!(entry->hdr.entry_capture_mask & capture_mask)) {
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ entry = (struct netxen_minidump_entry *)
+ ((char *) entry + entry->hdr.entry_size);
+ continue;
+ }
+ switch (entry->hdr.entry_type) {
+ case RDNOP:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ case RDEND:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ if (!sane_end)
+ end_cnt = e_cnt;
+ sane_end += 1;
+ break;
+ case CNTRL:
+ rv = netxen_md_cntrl(adapter,
+ template_hdr, (void *)entry);
+ if (rv)
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ case RDCRB:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rd_crb(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDMN:
+ case RDMEM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdmem(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case BOARD:
+ case RDROM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdrom(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case L2ITG:
+ case L2DTG:
+ case L2DAT:
+ case L2INS:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_L2Cache(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case L1DAT:
+ case L1INS:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_L1Cache(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDOCM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdocm(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDMUX:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdmux(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case QUEUE:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdqueue(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ default:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ }
+ /* Next entry in the template */
+ entry = (struct netxen_minidump_entry *)
+ ((char *) entry + entry->hdr.entry_size);
+ }
+ if (!sane_start || sane_end > 1) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware minidump template configuration error.\n");
+ }
+ return 0;
+}
+
+static int
+netxen_collect_minidump(struct netxen_adapter *adapter)
+{
+ int ret = 0;
+ struct netxen_minidump_template_hdr *hdr;
+ struct timespec val;
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
+ jiffies_to_timespec(jiffies, &val);
+ hdr->driver_timestamp = (u32) val.tv_sec;
+ hdr->driver_info_word2 = adapter->fw_version;
+ hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
+ ret = netxen_parse_md_template(adapter);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+void
+netxen_dump_fw(struct netxen_adapter *adapter)
+{
+ struct netxen_minidump_template_hdr *hdr;
+ int i, k, data_size = 0;
+ u32 capture_mask;
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ capture_mask = adapter->mdump.md_capture_mask;
+
+ for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) {
+ if (i & capture_mask)
+ data_size += hdr->capture_size_array[k];
+ }
+ if (!data_size) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid cap sizes for capture_mask=0x%x\n",
+ adapter->mdump.md_capture_mask);
+ return;
+ }
+ adapter->mdump.md_capture_size = data_size;
+ adapter->mdump.md_dump_size = adapter->mdump.md_template_size +
+ adapter->mdump.md_capture_size;
+ if (!adapter->mdump.md_capture_buff) {
+ adapter->mdump.md_capture_buff =
+ vmalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff) {
+ dev_info(&adapter->pdev->dev,
+ "Unable to allocate memory for minidump "
+ "capture_buffer(%d bytes).\n",
+ adapter->mdump.md_dump_size);
+ return;
+ }
+ memset(adapter->mdump.md_capture_buff, 0,
+ adapter->mdump.md_dump_size);
+ if (netxen_collect_minidump(adapter)) {
+ adapter->mdump.has_valid_dump = 0;
+ adapter->mdump.md_dump_size = 0;
+ vfree(adapter->mdump.md_capture_buff);
+ adapter->mdump.md_capture_buff = NULL;
+ dev_err(&adapter->pdev->dev,
+ "Error in collecting firmware minidump.\n");
+ } else {
+ adapter->mdump.md_timestamp = jiffies;
+ adapter->mdump.has_valid_dump = 1;
+ adapter->fw_mdump_rdy = 1;
+ dev_info(&adapter->pdev->dev, "%s Successfully "
+ "collected fw dump.\n", adapter->netdev->name);
+ }
+
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Cannot overwrite previously collected "
+ "firmware minidump.\n");
+ adapter->fw_mdump_rdy = 1;
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index a8259cc19a63..718b27440351 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -280,13 +280,10 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
- if (rds_ring->rx_buf_arr == NULL) {
- printk(KERN_ERR "%s: Failed to allocate "
- "rx buffer ring %d\n",
- netdev->name, ring);
+ if (rds_ring->rx_buf_arr == NULL)
/* free whatever was already allocated */
goto err_out;
- }
+
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -449,7 +446,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
/* resetall */
netxen_rom_lock(adapter);
- NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
netxen_rom_unlock(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
@@ -480,11 +477,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
- if (buf == NULL) {
- printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
- netxen_nic_driver_name);
+ if (buf == NULL)
return -ENOMEM;
- }
for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
@@ -1353,7 +1347,6 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
do {
val = NXRD32(adapter, CRB_CMDPEG_STATE);
-
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
case PHAN_INITIALIZE_ACK:
@@ -1494,7 +1487,7 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
- buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+ buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
if (!buffer->skb)
return 1;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7dd9a4b107e6..65a718f9ccd3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -82,7 +82,6 @@ static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_create_diag_entries(struct netxen_adapter *adapter);
static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
-
static int nx_dev_request_aer(struct netxen_adapter *adapter);
static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -519,7 +518,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
if (netif_running(netdev)) {
netif_device_detach(netdev);
@@ -802,16 +801,16 @@ err_out:
static void
netxen_check_options(struct netxen_adapter *adapter)
{
- u32 fw_major, fw_minor, fw_build;
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
char brd_name[NETXEN_MAX_SHORT_NAME];
char serial_num[32];
- int i, offset, val;
- int *ptr32;
+ int i, offset, val, err;
+ __le32 *ptr32;
struct pci_dev *pdev = adapter->pdev;
adapter->driver_mismatch = 0;
- ptr32 = (int *)&serial_num;
+ ptr32 = (__le32 *)&serial_num;
offset = NX_FW_SERIAL_NUM_OFFSET;
for (i = 0; i < 8; i++) {
if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
@@ -826,9 +825,22 @@ netxen_check_options(struct netxen_adapter *adapter)
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
-
+ prev_fw_version = adapter->fw_version;
adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
+ /* Get FW Mini Coredump template and store it */
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (adapter->mdump.md_template == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+ err = netxen_setup_minidump(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to setup minidump rcode = %d\n", err);
+ }
+ }
+
if (adapter->portnum == 0) {
get_brd_name_by_type(adapter->ahw.board_type, brd_name);
@@ -909,7 +921,12 @@ netxen_start_firmware(struct netxen_adapter *adapter)
if (err)
return err;
- if (!netxen_can_start_firmware(adapter))
+ err = netxen_can_start_firmware(adapter);
+
+ if (err < 0)
+ return err;
+
+ if (!err)
goto wait_init;
first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
@@ -1403,7 +1420,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
if(!netdev) {
- dev_err(&pdev->dev, "failed to allocate net_device\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -1529,6 +1545,18 @@ err_out_disable_pdev:
return err;
}
+static
+void netxen_cleanup_minidump(struct netxen_adapter *adapter)
+{
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+
+ if (adapter->mdump.md_capture_buff) {
+ vfree(adapter->mdump.md_capture_buff);
+ adapter->mdump.md_capture_buff = NULL;
+ }
+}
+
static void __devexit netxen_nic_remove(struct pci_dev *pdev)
{
struct netxen_adapter *adapter;
@@ -1564,8 +1592,10 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_release_firmware(adapter);
- if (NX_IS_REVISION_P3(pdev->revision))
+ if (NX_IS_REVISION_P3(pdev->revision)) {
+ netxen_cleanup_minidump(adapter);
pci_disable_pcie_error_reporting(pdev);
+ }
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -2317,7 +2347,7 @@ nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
static int
nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
{
- int count;
+ int count, state;
if (netxen_api_lock(adapter))
return -EIO;
@@ -2325,8 +2355,9 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
WARN_ON(count == 0);
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (count == 0)
+ if (count == 0 && state != NX_DEV_FAILED)
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
netxen_api_unlock(adapter);
@@ -2355,7 +2386,7 @@ nx_dev_request_aer(struct netxen_adapter *adapter)
return ret;
}
-static int
+int
nx_dev_request_reset(struct netxen_adapter *adapter)
{
u32 state;
@@ -2366,10 +2397,11 @@ nx_dev_request_reset(struct netxen_adapter *adapter)
state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state == NX_DEV_NEED_RESET)
+ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED)
ret = 0;
else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ adapter->flags |= NETXEN_FW_RESET_OWNER;
ret = 0;
}
@@ -2384,8 +2416,10 @@ netxen_can_start_firmware(struct netxen_adapter *adapter)
int count;
int can_start = 0;
- if (netxen_api_lock(adapter))
- return 0;
+ if (netxen_api_lock(adapter)) {
+ nx_incr_dev_ref_cnt(adapter);
+ return -1;
+ }
count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
@@ -2457,8 +2491,31 @@ netxen_fwinit_work(struct work_struct *work)
struct netxen_adapter *adapter = container_of(work,
struct netxen_adapter, fw_work.work);
int dev_state;
-
+ int count;
dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (adapter->flags & NETXEN_FW_RESET_OWNER) {
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ WARN_ON(count == 0);
+ if (count == 1) {
+ if (adapter->mdump.md_enabled) {
+ rtnl_lock();
+ netxen_dump_fw(adapter);
+ rtnl_unlock();
+ }
+ adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+ if (netxen_api_lock(adapter)) {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ NXWR32(adapter, NX_CRB_DEV_STATE,
+ NX_DEV_FAILED);
+ return;
+ }
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+ dev_state = NX_DEV_COLD;
+ netxen_api_unlock(adapter);
+ }
+ }
switch (dev_state) {
case NX_DEV_COLD:
@@ -2471,11 +2528,9 @@ netxen_fwinit_work(struct work_struct *work)
case NX_DEV_NEED_RESET:
case NX_DEV_INITALIZING:
- if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
netxen_schedule_work(adapter,
netxen_fwinit_work, 2 * FW_POLL_DELAY);
return;
- }
case NX_DEV_FAILED:
default:
@@ -2483,6 +2538,15 @@ netxen_fwinit_work(struct work_struct *work)
break;
}
+ if (netxen_api_lock(adapter)) {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ return;
+ }
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED);
+ netxen_api_unlock(adapter);
+ dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n",
+ adapter->netdev->name);
+
clear_bit(__NX_RESETTING, &adapter->state);
}
@@ -2492,7 +2556,7 @@ netxen_detach_work(struct work_struct *work)
struct netxen_adapter *adapter = container_of(work,
struct netxen_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
- int ref_cnt, delay;
+ int ref_cnt = 0, delay;
u32 status;
netif_device_detach(netdev);
@@ -2511,7 +2575,8 @@ netxen_detach_work(struct work_struct *work)
if (adapter->temp == NX_TEMP_PANIC)
goto err_ret;
- ref_cnt = nx_decr_dev_ref_cnt(adapter);
+ if (!(adapter->flags & NETXEN_FW_RESET_OWNER))
+ ref_cnt = nx_decr_dev_ref_cnt(adapter);
if (ref_cnt == -EIO)
goto err_ret;
@@ -2531,6 +2596,7 @@ static int
netxen_check_health(struct netxen_adapter *adapter)
{
u32 state, heartbit;
+ u32 peg_status;
struct net_device *netdev = adapter->netdev;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
@@ -2551,7 +2617,7 @@ netxen_check_health(struct netxen_adapter *adapter)
* Send request to destroy context in case of tx timeout only
* and doesn't required in case of Fw hang
*/
- if (state == NX_DEV_NEED_RESET) {
+ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) {
adapter->need_fw_reset = 1;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
goto detach;
@@ -2577,8 +2643,24 @@ netxen_check_health(struct netxen_adapter *adapter)
clear_bit(__NX_FW_ATTACHED, &adapter->state);
- dev_info(&netdev->dev, "firmware hang detected\n");
-
+ dev_err(&netdev->dev, "firmware hang detected\n");
+ peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ peg_status,
+ NXRD32(adapter, NETXEN_PEG_HALT_STATUS2),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c));
+ if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
detach:
if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
!test_and_set_bit(__NX_RESETTING, &adapter->state))
@@ -2848,13 +2930,12 @@ static struct bin_attribute bin_attr_mem = {
static void
netxen_create_sysfs_entries(struct netxen_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
+ struct device *dev = &adapter->pdev->dev;
if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
/* bridged_mode control */
if (device_create_file(dev, &dev_attr_bridged_mode)) {
- dev_warn(&netdev->dev,
+ dev_warn(dev,
"failed to create bridged_mode sysfs entry\n");
}
}
@@ -2863,8 +2944,7 @@ netxen_create_sysfs_entries(struct netxen_adapter *adapter)
static void
netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
+ struct device *dev = &adapter->pdev->dev;
if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
device_remove_file(dev, &dev_attr_bridged_mode);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index e61560e16385..df09b1cb742f 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2836,7 +2836,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
req_q_curr++;
tx_cb->oal = kmalloc(512, GFP_KERNEL);
if (tx_cb->oal == NULL)
- return -1;
+ return -ENOMEM;
}
return 0;
}
@@ -3804,7 +3804,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
ndev = alloc_etherdev(sizeof(struct ql3_adapter));
if (!ndev) {
- pr_err("%s could not alloc etherdev\n", pci_name(pdev));
err = -ENOMEM;
goto err_out_free_regions;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 60976fc4ccc6..2b5af22419a5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,7 +37,7 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
#define _QLCNIC_LINUX_SUBVERSION 25
-#define QLCNIC_LINUX_VERSIONID "5.0.25"
+#define QLCNIC_LINUX_VERSIONID "5.0.26"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index cc228cf3d84b..89ddf7f7d7df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -155,7 +155,6 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int check_sfp_module = 0;
- u16 pcifn = adapter->ahw->pci_func;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -194,10 +193,8 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
goto skip;
}
- val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
- ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ *
- P3P_LINK_SPEED_VAL(pcifn, val));
- ecmd->duplex = DUPLEX_FULL;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
ecmd->autoneg = AUTONEG_DISABLE;
} else
return -EIO;
@@ -722,7 +719,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int i, loop, cnt = 0;
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
- skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE);
+ skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
@@ -1155,7 +1152,6 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
if (!fw_dump->clr) {
netdev_info(netdev, "Dump not available\n");
- qlcnic_api_unlock(adapter);
return -EINVAL;
}
/* Copy template header first */
@@ -1174,7 +1170,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
vfree(fw_dump->data);
fw_dump->data = NULL;
fw_dump->clr = 0;
-
+ netdev_info(netdev, "extracted the FW dump Successfully\n");
return 0;
}
@@ -1192,7 +1188,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return ret;
}
if (fw_dump->clr) {
- dev_info(&adapter->pdev->dev,
+ netdev_info(netdev,
"Previous dump not cleared, not forcing dump\n");
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 38669583840c..d32cf0ddf1b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1369,7 +1369,13 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
adapter->module_type = module;
adapter->link_autoneg = autoneg;
- adapter->link_speed = link_speed;
+
+ if (link_status) {
+ adapter->link_speed = link_speed;
+ } else {
+ adapter->link_speed = SPEED_UNKNOWN;
+ adapter->link_duplex = DUPLEX_UNKNOWN;
+ }
}
static void
@@ -1434,7 +1440,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
- skb = dev_alloc_skb(rds_ring->skb_size);
+ skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
if (!skb) {
adapter->stats.skb_alloc_failure++;
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 69b8e4ef14d9..81bb1a69e69f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -301,7 +301,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return -EOPNOTSUPP;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
@@ -1576,7 +1576,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
if (!netdev) {
- dev_err(&pdev->dev, "failed to allocate net_device\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -3000,8 +2999,18 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
{
- u32 state;
-
+ u32 state, xg_val = 0, gb_val = 0;
+
+ qlcnic_xg_set_xg0_mask(xg_val);
+ qlcnic_xg_set_xg1_mask(xg_val);
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
+ qlcnic_gb_set_gb0_mask(gb_val);
+ qlcnic_gb_set_gb1_mask(gb_val);
+ qlcnic_gb_set_gb2_mask(gb_val);
+ qlcnic_gb_set_gb3_mask(gb_val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
+ dev_info(&adapter->pdev->dev, "Pause control frames disabled"
+ " on all ports\n");
adapter->need_fw_reset = 1;
if (qlcnic_api_lock(adapter))
return;
@@ -3150,7 +3159,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
- if (LSW(MSB(peg_status)) == 0x67)
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
"Device is being reset.\n");
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index b8478aab050e..5a639df33f18 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.29.00.00-01"
+#define DRV_VERSION "v1.00.00.30.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index fca804f36d61..58185b604b72 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1824,10 +1824,8 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
pr_err("%s: Enter\n", __func__);
ptr = kmalloc(size, GFP_ATOMIC);
- if (ptr == NULL) {
- pr_err("%s: Couldn't allocate a buffer\n", __func__);
+ if (ptr == NULL)
return;
- }
if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
pr_err("%s: Failed to upload control block!\n", __func__);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b54898737284..49343ec21c82 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -375,13 +375,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
u32 lower =
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
-
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Adding %s address %pM at index %d in the CAM.\n",
- type == MAC_ADDR_TYPE_MULTI_MAC ?
- "MULTICAST" : "UNICAST",
- addr, index);
-
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
@@ -430,12 +423,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
- netif_info(qdev, ifup, qdev->ndev,
- "%s VLAN ID %d %s the CAM.\n",
- enable_bit ? "Adding" : "Removing",
- index,
- enable_bit ? "to" : "from");
-
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
@@ -535,28 +522,6 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s %s mask %s the routing reg.\n",
- enable ? "Adding" : "Removing",
- index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
- index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
- index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
- index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
- index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
- index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
- index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
- index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
- index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
- index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
- index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
- index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
- index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
- index == RT_IDX_UNUSED013 ? "UNUSED13" :
- index == RT_IDX_UNUSED014 ? "UNUSED14" :
- index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
- "(Bad index != RT_IDX)",
- enable ? "to" : "from");
-
switch (mask) {
case RT_IDX_CAM_HIT:
{
@@ -1178,14 +1143,16 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
int i;
while (rx_ring->lbq_free_cnt > 32) {
- for (i = 0; i < 16; i++) {
+ for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"lbq: try cleaning clean_idx = %d.\n",
clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+ rx_ring->lbq_clean_idx = clean_idx;
netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk.\n");
+ "Could not get a page chunk, i=%d, clean_idx =%d .\n",
+ i, clean_idx);
return;
}
@@ -1230,7 +1197,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
int i;
while (rx_ring->sbq_free_cnt > 16) {
- for (i = 0; i < 16; i++) {
+ for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"sbq: try cleaning clean_idx = %d.\n",
@@ -1576,13 +1543,14 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
+ struct iphdr *iph =
+ (struct iphdr *) ((u8 *)addr + ETH_HLEN);
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
- "TCP checksum done!\n");
+ "UDP checksum done!\n");
}
}
}
@@ -1690,7 +1658,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
- "TCP checksum done!\n");
+ "UDP checksum done!\n");
}
}
}
@@ -2312,13 +2280,9 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
struct ql_adapter *qdev = netdev_priv(ndev);
if (features & NETIF_F_HW_VLAN_RX) {
- netif_printk(qdev, ifup, KERN_DEBUG, ndev,
- "Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- netif_printk(qdev, ifup, KERN_DEBUG, ndev,
- "Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
@@ -3183,8 +3147,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Invalid rx_ring->type = %d.\n", rx_ring->type);
}
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Initializing rx work queue.\n");
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
@@ -3237,8 +3199,6 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Successfully loaded WQICB.\n");
return err;
}
@@ -3488,12 +3448,8 @@ static void ql_free_irq(struct ql_adapter *qdev)
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
- netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
- "freeing msix interrupt %d.\n", i);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
- "freeing msi interrupt %d.\n", i);
}
}
}
@@ -3522,17 +3478,6 @@ static int ql_request_irq(struct ql_adapter *qdev)
"Failed request for MSIX interrupt %d.\n",
i);
goto err_irq;
- } else {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Hooked intr %d, queue type %s, with name %s.\n",
- i,
- qdev->rx_ring[i].type == DEFAULT_Q ?
- "DEFAULT_Q" :
- qdev->rx_ring[i].type == TX_Q ?
- "TX_Q" :
- qdev->rx_ring[i].type == RX_Q ?
- "RX_Q" : "",
- intr_context->name);
}
} else {
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
@@ -3602,15 +3547,11 @@ static int ql_start_rss(struct ql_adapter *qdev)
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
-
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Successfully loaded RICB.\n");
return status;
}
@@ -3817,11 +3758,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
}
/* Start NAPI for the RSS queues. */
- for (i = 0; i < qdev->rss_ring_count; i++) {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Enabling NAPI for rx_ring[%d].\n", i);
+ for (i = 0; i < qdev->rss_ring_count; i++)
napi_enable(&qdev->rx_ring[i].napi);
- }
return status;
}
@@ -4121,10 +4059,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "lbq_buf_size %d, order = %d\n",
- rx_ring->lbq_buf_size,
- qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
diff --git a/drivers/net/ethernet/racal/ni5010.c b/drivers/net/ethernet/racal/ni5010.c
index 072810da9a37..807982220050 100644
--- a/drivers/net/ethernet/racal/ni5010.c
+++ b/drivers/net/ethernet/racal/ni5010.c
@@ -552,7 +552,7 @@ static void ni5010_rx(struct net_device *dev)
}
/* Malloc up new buffer. */
- skb = dev_alloc_skb(i_pkt_size + 3);
+ skb = netdev_alloc_skb(dev, i_pkt_size + 3);
if (skb == NULL) {
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index cb0eca807852..b96e1920e045 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1107,7 +1107,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
- dev_err(&pdev->dev, "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
@@ -1152,7 +1151,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
if (!(adrp[0] || adrp[1] || adrp[2])) {
netdev_warn(dev, "MAC address not initialized, "
"generating random\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
/* Link new device into r6040_root_dev */
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index a8779bedb3d9..df7fd8d083dc 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -565,6 +565,12 @@ struct rtl_extra_stats {
unsigned long rx_lost_in_ring;
};
+struct rtl8139_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
struct rtl8139_private {
void __iomem *mmio_addr;
int drv_flags;
@@ -575,11 +581,13 @@ struct rtl8139_private {
unsigned char *rx_ring;
unsigned int cur_rx; /* RX buf index of next pkt */
+ struct rtl8139_stats rx_stats;
dma_addr_t rx_ring_dma;
unsigned int tx_flag;
unsigned long cur_tx;
unsigned long dirty_tx;
+ struct rtl8139_stats tx_stats;
unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
unsigned char *tx_bufs; /* Tx bounce buffer region. */
dma_addr_t tx_bufs_dma;
@@ -641,7 +649,9 @@ static int rtl8139_poll(struct napi_struct *napi, int budget);
static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
static int rtl8139_close (struct net_device *dev);
static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
+static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64
+ *stats);
static void rtl8139_set_rx_mode (struct net_device *dev);
static void __set_rx_mode (struct net_device *dev);
static void rtl8139_hw_start (struct net_device *dev);
@@ -754,10 +764,9 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
/* dev and priv zeroed in alloc_etherdev */
dev = alloc_etherdev (sizeof (*tp));
- if (dev == NULL) {
- dev_err(&pdev->dev, "Unable to alloc new net device\n");
+ if (dev == NULL)
return ERR_PTR(-ENOMEM);
- }
+
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
@@ -908,10 +917,37 @@ err_out:
return ERR_PTR(rc);
}
+static int rtl8139_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ netdev_features_t changed = features ^ dev->features;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (!(changed & (NETIF_F_RXALL)))
+ return 0;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (changed & NETIF_F_RXALL) {
+ int rx_mode = tp->rx_config;
+ if (features & NETIF_F_RXALL)
+ rx_mode |= (AcceptErr | AcceptRunt);
+ else
+ rx_mode &= ~(AcceptErr | AcceptRunt);
+ tp->rx_config = rtl8139_rx_config | rx_mode;
+ RTL_W32_F(RxConfig, tp->rx_config);
+ }
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return 0;
+}
+
static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_open = rtl8139_open,
.ndo_stop = rtl8139_close,
- .ndo_get_stats = rtl8139_get_stats,
+ .ndo_get_stats64 = rtl8139_get_stats64,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = rtl8139_set_mac_address,
@@ -922,6 +958,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8139_poll_controller,
#endif
+ .ndo_set_features = rtl8139_set_features,
};
static int __devinit rtl8139_init_one (struct pci_dev *pdev,
@@ -995,6 +1032,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
dev->vlan_features = dev->features;
+ dev->hw_features |= NETIF_F_RXALL;
+ dev->hw_features |= NETIF_F_RXFCS;
+
dev->irq = pdev->irq;
/* tp zeroed and aligned in alloc_etherdev */
@@ -1777,8 +1817,10 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
dev->stats.tx_fifo_errors++;
}
dev->stats.collisions += (txstatus >> 24) & 15;
- dev->stats.tx_bytes += txstatus & 0x7ff;
- dev->stats.tx_packets++;
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += txstatus & 0x7ff;
+ u64_stats_update_end(&tp->tx_stats.syncp);
}
dirty_tx++;
@@ -1941,7 +1983,10 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
/* read size+status of next frame from DMA ring buffer */
rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
rx_size = rx_status >> 16;
- pkt_size = rx_size - 4;
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size = rx_size - 4;
+ else
+ pkt_size = rx_size;
netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
__func__, rx_status, rx_size, cur_rx);
@@ -1979,11 +2024,30 @@ no_early_rx:
if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
(rx_size < 8) ||
(!(rx_status & RxStatusOK)))) {
+ if ((dev->features & NETIF_F_RXALL) &&
+ (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) &&
+ (rx_size >= 8) &&
+ (!(rx_status & RxStatusOK))) {
+ /* Length is at least mostly OK, but pkt has
+ * error. I'm hoping we can handle some of these
+ * errors without resetting the chip. --Ben
+ */
+ dev->stats.rx_errors++;
+ if (rx_status & RxCRCErr) {
+ dev->stats.rx_crc_errors++;
+ goto keep_pkt;
+ }
+ if (rx_status & RxRunt) {
+ dev->stats.rx_length_errors++;
+ goto keep_pkt;
+ }
+ }
rtl8139_rx_err (rx_status, dev, tp, ioaddr);
received = -1;
goto out;
}
+keep_pkt:
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
@@ -1998,8 +2062,10 @@ no_early_rx:
skb->protocol = eth_type_trans (skb, dev);
- dev->stats.rx_bytes += pkt_size;
- dev->stats.rx_packets++;
+ u64_stats_update_begin(&tp->rx_stats.syncp);
+ tp->rx_stats.packets++;
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
netif_receive_skb (skb);
} else {
@@ -2463,11 +2529,13 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
-static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
+static struct rtnl_link_stats64 *
+rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
+ unsigned int start;
if (netif_running(dev)) {
spin_lock_irqsave (&tp->lock, flags);
@@ -2476,7 +2544,21 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
}
- return &dev->stats;
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ stats->rx_packets = tp->rx_stats.packets;
+ stats->rx_bytes = tp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ stats->tx_packets = tp->tx_stats.packets;
+ stats->tx_bytes = tp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+
+ return stats;
}
/* Set or clear the multicast filter for this adaptor.
@@ -2516,6 +2598,9 @@ static void __set_rx_mode (struct net_device *dev)
}
}
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= (AcceptErr | AcceptRunt);
+
/* We can safely update without stopping the chip. */
tmp = rtl8139_rx_config | rx_mode;
if (tp->rx_config != tmp) {
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 0578859a3c73..5821966f9f28 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -24,11 +24,11 @@ config ATP
select CRC32
---help---
This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>, if you
- want to use this. If you intend to use this driver, you should have
- said N to the "Parallel printer support", because the two drivers
- don't like each other.
+ port. Read <file:drivers/net/ethernet/realtek/atp.c> as well as the
+ Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>,
+ if you want to use this. If you intend to use this driver, you
+ should have said N to the "Parallel printer support", because the two
+ drivers don't like each other.
To compile this driver as a module, choose M here: the module
will be called atp.
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index e3f57fdbf0ea..46c1932048cb 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -783,7 +783,7 @@ static void net_rx(struct net_device *dev)
int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
dev->name);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bbacb3741ec0..27c358c8f4dc 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -255,10 +255,6 @@ enum cfg_version {
RTL_CFG_2
};
-static void rtl_hw_start_8169(struct net_device *);
-static void rtl_hw_start_8168(struct net_device *);
-static void rtl_hw_start_8101(struct net_device *);
-
static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
@@ -667,12 +663,25 @@ struct rtl8169_counters {
__le16 tx_underun;
};
+enum rtl_flag {
+ RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_SLOW_PENDING,
+ RTL_FLAG_TASK_RESET_PENDING,
+ RTL_FLAG_TASK_PHY_PENDING,
+ RTL_FLAG_MAX
+};
+
+struct rtl8169_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
struct net_device *dev;
struct napi_struct napi;
- spinlock_t lock;
u32 msg_enable;
u16 txd_version;
u16 mac_version;
@@ -680,6 +689,8 @@ struct rtl8169_private {
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_rx;
u32 dirty_tx;
+ struct rtl8169_stats rx_stats;
+ struct rtl8169_stats tx_stats;
struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
@@ -688,9 +699,8 @@ struct rtl8169_private {
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
struct timer_list timer;
u16 cp_cmd;
- u16 intr_event;
- u16 napi_event;
- u16 intr_mask;
+
+ u16 event_slow;
struct mdio_ops {
void (*write)(void __iomem *, int, int);
@@ -714,7 +724,13 @@ struct rtl8169_private {
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
unsigned int (*link_ok)(void __iomem *);
int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
- struct delayed_work task;
+
+ struct {
+ DECLARE_BITMAP(flags, RTL_FLAG_MAX);
+ struct mutex mutex;
+ struct work_struct work;
+ } wk;
+
unsigned features;
struct mii_if_info mii;
@@ -754,22 +770,15 @@ MODULE_FIRMWARE(FIRMWARE_8105E_1);
MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
-static int rtl8169_open(struct net_device *dev);
-static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
-static int rtl8169_init_ring(struct net_device *dev);
-static void rtl_hw_start(struct net_device *dev);
-static int rtl8169_close(struct net_device *dev);
-static void rtl_set_rx_mode(struct net_device *dev);
-static void rtl8169_tx_timeout(struct net_device *dev);
-static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
-static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
- void __iomem *, u32 budget);
-static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
-static void rtl8169_down(struct net_device *dev);
-static void rtl8169_rx_clear(struct rtl8169_private *tp);
-static int rtl8169_poll(struct napi_struct *napi, int budget);
+static void rtl_lock_work(struct rtl8169_private *tp)
+{
+ mutex_lock(&tp->wk.mutex);
+}
+
+static void rtl_unlock_work(struct rtl8169_private *tp)
+{
+ mutex_unlock(&tp->wk.mutex);
+}
static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
{
@@ -1180,12 +1189,51 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value;
}
+static u16 rtl_get_events(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R16(IntrStatus);
+}
+
+static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W16(IntrStatus, bits);
+ mmiowb();
+}
+
+static void rtl_irq_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W16(IntrMask, 0);
+ mmiowb();
+}
+
+static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W16(IntrMask, bits);
+}
+
+#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
+#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
+#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
+
+static void rtl_irq_enable_all(struct rtl8169_private *tp)
+{
+ rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
+}
+
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrMask, 0x0000);
- RTL_W16(IntrStatus, tp->intr_event);
+ rtl_irq_disable(tp);
+ rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
RTL_R8(ChipCmd);
}
@@ -1276,9 +1324,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp,
void __iomem *ioaddr, bool pm)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
@@ -1293,7 +1338,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
if (pm)
pm_schedule_suspend(&tp->pci_dev->dev, 5000);
}
- spin_unlock_irqrestore(&tp->lock, flags);
}
static void rtl8169_check_link_status(struct net_device *dev,
@@ -1336,12 +1380,12 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
wol->supported = WAKE_ANY;
wol->wolopts = __rtl8169_get_wol(tp);
- spin_unlock_irq(&tp->lock);
+ rtl_unlock_work(tp);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1378,14 +1422,15 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
if (wol->wolopts)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- spin_unlock_irq(&tp->lock);
+
+ rtl_unlock_work(tp);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
@@ -1540,15 +1585,14 @@ out:
static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
int ret;
del_timer_sync(&tp->timer);
- spin_lock_irqsave(&tp->lock, flags);
+ rtl_lock_work(tp);
ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
cmd->duplex, cmd->advertising);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl_unlock_work(tp);
return ret;
}
@@ -1568,33 +1612,51 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
return features;
}
-static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+static void __rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
- spin_lock_irqsave(&tp->lock, flags);
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
+ return;
- if (features & NETIF_F_RXCSUM)
- tp->cp_cmd |= RxChkSum;
- else
- tp->cp_cmd &= ~RxChkSum;
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
+ if (features & NETIF_F_RXCSUM)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
- if (dev->features & NETIF_F_HW_VLAN_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
+ if (dev->features & NETIF_F_HW_VLAN_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_R16(CPlusCmd);
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+ }
+ if (changed & NETIF_F_RXALL) {
+ int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
+ if (features & NETIF_F_RXALL)
+ tmp |= (AcceptErr | AcceptRunt);
+ RTL_W32(RxConfig, tmp);
+ }
+}
- spin_unlock_irqrestore(&tp->lock, flags);
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_lock_work(tp);
+ __rtl8169_set_features(dev, features);
+ rtl_unlock_work(tp);
return 0;
}
+
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
@@ -1643,14 +1705,12 @@ static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
int rc;
- spin_lock_irqsave(&tp->lock, flags);
-
+ rtl_lock_work(tp);
rc = tp->get_settings(dev, cmd);
+ rtl_unlock_work(tp);
- spin_unlock_irqrestore(&tp->lock, flags);
return rc;
}
@@ -1658,14 +1718,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
if (regs->len > R8169_REGS_SIZE)
regs->len = R8169_REGS_SIZE;
- spin_lock_irqsave(&tp->lock, flags);
+ rtl_lock_work(tp);
memcpy_fromio(p, tp->mmio_addr, regs->len);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl_unlock_work(tp);
}
static u32 rtl8169_get_msglevel(struct net_device *dev)
@@ -3182,18 +3241,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
}
}
-static void rtl8169_phy_timer(unsigned long __opaque)
+static void rtl_phy_work(struct rtl8169_private *tp)
{
- struct net_device *dev = (struct net_device *)__opaque;
- struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
- spin_lock_irq(&tp->lock);
-
if (tp->phy_reset_pending(tp)) {
/*
* A busy loop could burn quite a few cycles on nowadays CPU.
@@ -3204,34 +3259,29 @@ static void rtl8169_phy_timer(unsigned long __opaque)
}
if (tp->link_ok(ioaddr))
- goto out_unlock;
+ return;
- netif_warn(tp, link, dev, "PHY reset until link up\n");
+ netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
tp->phy_reset_enable(tp);
out_mod_timer:
mod_timer(timer, jiffies + timeout);
-out_unlock:
- spin_unlock_irq(&tp->lock);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void rtl8169_netpoll(struct net_device *dev)
+static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
+ if (!test_and_set_bit(flag, tp->wk.flags))
+ schedule_work(&tp->wk.work);
+}
+
+static void rtl8169_phy_timer(unsigned long __opaque)
+{
+ struct net_device *dev = (struct net_device *)__opaque;
struct rtl8169_private *tp = netdev_priv(dev);
- struct pci_dev *pdev = tp->pci_dev;
- disable_irq(pdev->irq);
- rtl8169_interrupt(pdev->irq, dev);
- enable_irq(pdev->irq);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
-#endif
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
@@ -3310,7 +3360,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
high = addr[4] | (addr[5] << 8);
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
RTL_W8(Cfg9346, Cfg9346_Unlock);
@@ -3334,7 +3384,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
RTL_W8(Cfg9346, Cfg9346_Lock);
- spin_unlock_irq(&tp->lock);
+ rtl_unlock_work(tp);
}
static int rtl_set_mac_address(struct net_device *dev, void *p)
@@ -3384,69 +3434,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data
return -EOPNOTSUPP;
}
-static const struct rtl_cfg_info {
- void (*hw_start)(struct net_device *);
- unsigned int region;
- unsigned int align;
- u16 intr_event;
- u16 napi_event;
- unsigned features;
- u8 default_ver;
-} rtl_cfg_infos [] = {
- [RTL_CFG_0] = {
- .hw_start = rtl_hw_start_8169,
- .region = 1,
- .align = 0,
- .intr_event = SYSErr | LinkChg | RxOverflow |
- RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
- .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_GMII,
- .default_ver = RTL_GIGA_MAC_VER_01,
- },
- [RTL_CFG_1] = {
- .hw_start = rtl_hw_start_8168,
- .region = 2,
- .align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow |
- TxErr | TxOK | RxOK | RxErr,
- .napi_event = TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
- .default_ver = RTL_GIGA_MAC_VER_11,
- },
- [RTL_CFG_2] = {
- .hw_start = rtl_hw_start_8101,
- .region = 2,
- .align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
- RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
- .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_MSI,
- .default_ver = RTL_GIGA_MAC_VER_13,
- }
-};
-
-/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct rtl8169_private *tp,
- const struct rtl_cfg_info *cfg)
-{
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned msi = 0;
- u8 cfg2;
-
- cfg2 = RTL_R8(Config2) & ~MSIEnable;
- if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(tp->pci_dev)) {
- netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
- } else {
- cfg2 |= MSIEnable;
- msi = RTL_FEATURE_MSI;
- }
- }
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- RTL_W8(Config2, cfg2);
- return msi;
-}
-
static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
{
if (tp->features & RTL_FEATURE_MSI) {
@@ -3455,25 +3442,6 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
}
}
-static const struct net_device_ops rtl8169_netdev_ops = {
- .ndo_open = rtl8169_open,
- .ndo_stop = rtl8169_close,
- .ndo_get_stats = rtl8169_get_stats,
- .ndo_start_xmit = rtl8169_start_xmit,
- .ndo_tx_timeout = rtl8169_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = rtl8169_change_mtu,
- .ndo_fix_features = rtl8169_fix_features,
- .ndo_set_features = rtl8169_set_features,
- .ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = rtl8169_ioctl,
- .ndo_set_rx_mode = rtl_set_rx_mode,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = rtl8169_netpoll,
-#endif
-
-};
-
static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
@@ -3832,23 +3800,21 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
RTL_W8(MaxTxPacketSize, 0x3f);
RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
RTL_W8(Config4, RTL_R8(Config4) | 0x01);
- pci_write_config_byte(pdev, 0x79, 0x20);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
RTL_W8(MaxTxPacketSize, 0x0c);
RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
- pci_write_config_byte(pdev, 0x79, 0x50);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
}
static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -3947,280 +3913,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
}
}
-static int __devinit
-rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
- const unsigned int region = cfg->region;
- struct rtl8169_private *tp;
- struct mii_if_info *mii;
- struct net_device *dev;
- void __iomem *ioaddr;
- int chipset, i;
- int rc;
-
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
-
- dev = alloc_etherdev(sizeof (*tp));
- if (!dev) {
- if (netif_msg_drv(&debug))
- dev_err(&pdev->dev, "unable to alloc new ethernet\n");
- rc = -ENOMEM;
- goto out;
- }
-
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->netdev_ops = &rtl8169_netdev_ops;
- tp = netdev_priv(dev);
- tp->dev = dev;
- tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
-
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- /* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
- if (rc < 0) {
- netif_err(tp, probe, dev, "enable failure\n");
- goto err_out_free_dev_1;
- }
-
- if (pci_set_mwi(pdev) < 0)
- netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
-
- /* make sure PCI base addr 1 is MMIO */
- if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- netif_err(tp, probe, dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- rc = -ENODEV;
- goto err_out_mwi_2;
- }
-
- /* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- netif_err(tp, probe, dev,
- "Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out_mwi_2;
- }
-
- rc = pci_request_regions(pdev, MODULENAME);
- if (rc < 0) {
- netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_2;
- }
-
- tp->cp_cmd = RxChkSum;
-
- if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
- tp->cp_cmd |= PCIDAC;
- dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_3;
- }
- }
-
- /* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
- if (!ioaddr) {
- netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res_3;
- }
- tp->mmio_addr = ioaddr;
-
- if (!pci_is_pcie(pdev))
- netif_info(tp, probe, dev, "not PCI Express\n");
-
- /* Identify chip attached to board */
- rtl8169_get_mac_version(tp, dev, cfg->default_ver);
-
- rtl_init_rxcfg(tp);
-
- RTL_W16(IntrMask, 0x0000);
-
- rtl_hw_reset(tp);
-
- RTL_W16(IntrStatus, 0xffff);
-
- pci_set_master(pdev);
-
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- tp->cp_cmd |= RxVlan;
-
- rtl_init_mdio_ops(tp);
- rtl_init_pll_power_ops(tp);
- rtl_init_jumbo_ops(tp);
-
- rtl8169_print_mac_version(tp);
-
- chipset = tp->mac_version;
- tp->txd_version = rtl_chip_infos[chipset].txd_version;
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
- if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(tp, cfg);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_settings = rtl8169_gset_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_settings = rtl8169_gset_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
- }
-
- spin_lock_init(&tp->lock);
-
- /* Get MAC address */
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
- dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
- dev->irq = pdev->irq;
- dev->base_addr = (unsigned long) ioaddr;
-
- netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
-
- /* don't enable SG, IP_CSUM and TSO by default - it might not work
- * properly for all devices */
- dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HIGHDMA;
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- /* 8110SCd requires hardware Rx VLAN - disallow toggling */
- dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
-
- tp->intr_mask = 0xffff;
- tp->hw_start = cfg->hw_start;
- tp->intr_event = cfg->intr_event;
- tp->napi_event = cfg->napi_event;
-
- tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
- ~(RxBOVF | RxFOVF) : ~0;
-
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = rtl8169_phy_timer;
-
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
-
- rc = register_netdev(dev);
- if (rc < 0)
- goto err_out_msi_4;
-
- pci_set_drvdata(pdev, dev);
-
- netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
- (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
- if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
- netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
- "tx checksumming: %s]\n",
- rtl_chip_infos[chipset].jumbo_max,
- rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
- }
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- rtl8168_driver_start(tp);
- }
-
- device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
-
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
- netif_carrier_off(dev);
-
-out:
- return rc;
-
-err_out_msi_4:
- rtl_disable_msi(pdev, tp);
- iounmap(ioaddr);
-err_out_free_res_3:
- pci_release_regions(pdev);
-err_out_mwi_2:
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
-err_out_free_dev_1:
- free_netdev(dev);
- goto out;
-}
-
-static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- rtl8168_driver_stop(tp);
- }
-
- cancel_delayed_work_sync(&tp->task);
-
- unregister_netdev(dev);
-
- rtl_release_firmware(tp);
-
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
-
- /* restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
-
- rtl_disable_msi(pdev, tp);
- rtl8169_release_board(pdev, dev, tp->mmio_addr);
- pci_set_drvdata(pdev, NULL);
-}
-
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
{
struct rtl_fw *rtl_fw;
@@ -4265,78 +3957,6 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
rtl_request_uncached_firmware(tp);
}
-static int rtl8169_open(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
- int retval = -ENOMEM;
-
- pm_runtime_get_sync(&pdev->dev);
-
- /*
- * Rx and Tx desscriptors needs 256 bytes alignment.
- * dma_alloc_coherent provides more.
- */
- tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
- &tp->TxPhyAddr, GFP_KERNEL);
- if (!tp->TxDescArray)
- goto err_pm_runtime_put;
-
- tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
- &tp->RxPhyAddr, GFP_KERNEL);
- if (!tp->RxDescArray)
- goto err_free_tx_0;
-
- retval = rtl8169_init_ring(dev);
- if (retval < 0)
- goto err_free_rx_1;
-
- INIT_DELAYED_WORK(&tp->task, NULL);
-
- smp_mb();
-
- rtl_request_firmware(tp);
-
- retval = request_irq(dev->irq, rtl8169_interrupt,
- (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
- dev->name, dev);
- if (retval < 0)
- goto err_release_fw_2;
-
- napi_enable(&tp->napi);
-
- rtl8169_init_phy(dev, tp);
-
- rtl8169_set_features(dev, dev->features);
-
- rtl_pll_power_up(tp);
-
- rtl_hw_start(dev);
-
- tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
-
- rtl8169_check_link_status(dev, tp, ioaddr);
-out:
- return retval;
-
-err_release_fw_2:
- rtl_release_firmware(tp);
- rtl8169_rx_clear(tp);
-err_free_rx_1:
- dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
- tp->RxDescArray = NULL;
-err_free_tx_0:
- dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
- tp->TxDescArray = NULL;
-err_pm_runtime_put:
- pm_runtime_put_noidle(&pdev->dev);
- goto out;
-}
-
static void rtl_rx_close(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -4387,7 +4007,7 @@ static void rtl_hw_start(struct net_device *dev)
tp->hw_start(dev);
- netif_start_queue(dev);
+ rtl_irq_enable_all(tp);
}
static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
@@ -4444,6 +4064,56 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
}
}
+static void rtl_set_rx_mode(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= (AcceptErr | AcceptRunt);
+
+ tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ u32 data = mc_filter[0];
+
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(data);
+ }
+
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+ RTL_W32(RxConfig, tmp);
+}
+
static void rtl_hw_start_8169(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -4514,9 +4184,6 @@ static void rtl_hw_start_8169(struct net_device *dev)
/* no early-rx interrupts */
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
-
- /* Enable all known interrupts by setting the interrupt mask. */
- RTL_W16(IntrMask, tp->intr_event);
}
static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
@@ -4896,8 +4563,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->intr_event |= RxFIFOOver | PCSTimeout;
- tp->intr_event &= ~RxOverflow;
+ tp->event_slow |= RxFIFOOver | PCSTimeout;
+ tp->event_slow &= ~RxOverflow;
}
rtl_set_rx_tx_desc_registers(tp, ioaddr);
@@ -4985,8 +4652,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W8(Cfg9346, Cfg9346_Lock);
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
-
- RTL_W16(IntrMask, tp->intr_event);
}
#define R810X_CPCMD_QUIRK_MASK (\
@@ -5085,10 +4750,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
- tp->intr_event &= ~RxFIFOOver;
- tp->napi_event &= ~RxFIFOOver;
- }
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
+ tp->event_slow &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
@@ -5144,8 +4807,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
rtl_set_rx_mode(dev);
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
-
- RTL_W16(IntrMask, tp->intr_event);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -5336,94 +4997,37 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
tp->cur_tx = tp->dirty_tx = 0;
+ netdev_reset_queue(tp->dev);
}
-static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- PREPARE_DELAYED_WORK(&tp->task, task);
- schedule_delayed_work(&tp->task, 4);
-}
-
-static void rtl8169_wait_for_quiescence(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
-
- synchronize_irq(dev->irq);
-
- /* Wait for any pending NAPI task to complete */
- napi_disable(&tp->napi);
-
- rtl8169_irq_mask_and_ack(tp);
-
- tp->intr_mask = 0xffff;
- RTL_W16(IntrMask, tp->intr_event);
- napi_enable(&tp->napi);
-}
-
-static void rtl8169_reinit_task(struct work_struct *work)
+static void rtl_reset_work(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp =
- container_of(work, struct rtl8169_private, task.work);
- struct net_device *dev = tp->dev;
- int ret;
-
- rtnl_lock();
-
- if (!netif_running(dev))
- goto out_unlock;
-
- rtl8169_wait_for_quiescence(dev);
- rtl8169_close(dev);
-
- ret = rtl8169_open(dev);
- if (unlikely(ret < 0)) {
- if (net_ratelimit())
- netif_err(tp, drv, dev,
- "reinit failure (status = %d). Rescheduling\n",
- ret);
- rtl8169_schedule_work(dev, rtl8169_reinit_task);
- }
-
-out_unlock:
- rtnl_unlock();
-}
-
-static void rtl8169_reset_task(struct work_struct *work)
-{
- struct rtl8169_private *tp =
- container_of(work, struct rtl8169_private, task.work);
struct net_device *dev = tp->dev;
int i;
- rtnl_lock();
-
- if (!netif_running(dev))
- goto out_unlock;
+ napi_disable(&tp->napi);
+ netif_stop_queue(dev);
+ synchronize_sched();
rtl8169_hw_reset(tp);
- rtl8169_wait_for_quiescence(dev);
-
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
+ napi_enable(&tp->napi);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
-
-out_unlock:
- rtnl_unlock();
}
static void rtl8169_tx_timeout(struct net_device *dev)
{
- rtl8169_schedule_work(dev, rtl8169_reset_task);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
@@ -5548,6 +5152,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
+ netdev_sent_queue(dev, skb->len);
+
+ skb_tx_timestamp(skb);
+
wmb();
/* Anti gcc 2.95.3 bugware (sic) */
@@ -5560,9 +5168,22 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
RTL_W8(TxPoll, NPQ);
+ mmiowb();
+
if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
+ * not miss a ring update when it notices a stopped queue.
+ */
+ smp_wmb();
netif_stop_queue(dev);
- smp_rmb();
+ /* Sync with rtl_tx:
+ * - publish queue status and cur_tx ring index (write barrier)
+ * - refresh dirty_tx ring index (read barrier).
+ * May the current thread have a pessimistic view of the ring
+ * status and forget to wake up queue, a racing rtl_tx thread
+ * can't.
+ */
+ smp_mb();
if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
netif_wake_queue(dev);
}
@@ -5626,14 +5247,19 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl8169_hw_reset(tp);
- rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-static void rtl8169_tx_interrupt(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
+struct rtl_txc {
+ int packets;
+ int bytes;
+};
+
+static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
+ struct rtl8169_stats *tx_stats = &tp->tx_stats;
unsigned int dirty_tx, tx_left;
+ struct rtl_txc txc = { 0, 0 };
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -5652,18 +5278,34 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tx_skb->skb->len;
- dev_kfree_skb(tx_skb->skb);
+ struct sk_buff *skb = tx_skb->skb;
+
+ txc.packets++;
+ txc.bytes += skb->len;
+ dev_kfree_skb(skb);
tx_skb->skb = NULL;
}
dirty_tx++;
tx_left--;
}
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets += txc.packets;
+ tx_stats->bytes += txc.bytes;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ netdev_completed_queue(dev, txc.packets, txc.bytes);
+
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
- smp_wmb();
+ /* Sync with rtl8169_start_xmit:
+ * - publish dirty_tx ring index (write barrier)
+ * - refresh cur_tx ring index and queue status (read barrier)
+ * May the current thread miss the stopped queue condition,
+ * a racing xmit thread can only have a right view of the
+ * ring status.
+ */
+ smp_mb();
if (netif_queue_stopped(dev) &&
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
@@ -5674,9 +5316,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
* of start_xmit activity is detected (if it is not detected,
* it is slow enough). -- FR
*/
- smp_rmb();
- if (tp->cur_tx != dirty_tx)
+ if (tp->cur_tx != dirty_tx) {
+ void __iomem *ioaddr = tp->mmio_addr;
+
RTL_W8(TxPoll, NPQ);
+ }
}
}
@@ -5715,9 +5359,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
return skb;
}
-static int rtl8169_rx_interrupt(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr, u32 budget)
+static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
unsigned int cur_rx, rx_left;
unsigned int count;
@@ -5745,14 +5387,26 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
if (status & RxCRC)
dev->stats.rx_crc_errors++;
if (status & RxFOVF) {
- rtl8169_schedule_work(dev, rtl8169_reset_task);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
dev->stats.rx_fifo_errors++;
}
+ if ((status & (RxRUNT | RxCRC)) &&
+ !(status & (RxRWT | RxFOVF)) &&
+ (dev->features & NETIF_F_RXALL))
+ goto process_pkt;
+
rtl8169_mark_to_asic(desc, rx_buf_sz);
} else {
struct sk_buff *skb;
- dma_addr_t addr = le64_to_cpu(desc->addr);
- int pkt_size = (status & 0x00003fff) - 4;
+ dma_addr_t addr;
+ int pkt_size;
+
+process_pkt:
+ addr = le64_to_cpu(desc->addr);
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size = (status & 0x00003fff) - 4;
+ else
+ pkt_size = status & 0x00003fff;
/*
* The driver does not support incoming fragmented
@@ -5782,8 +5436,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
napi_gro_receive(&tp->napi, skb);
- dev->stats.rx_bytes += pkt_size;
- dev->stats.rx_packets++;
+ u64_stats_update_begin(&tp->rx_stats.syncp);
+ tp->rx_stats.packets++;
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
}
/* Work around for AMD plateform. */
@@ -5806,101 +5462,120 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
int handled = 0;
- int status;
+ u16 status;
- /* loop handling interrupts until we have no new ones or
- * we hit a invalid/hotplug case.
- */
- status = RTL_R16(IntrStatus);
- while (status && status != 0xffff) {
- status &= tp->intr_event;
- if (!status)
- break;
+ status = rtl_get_events(tp);
+ if (status && status != 0xffff) {
+ status &= RTL_EVENT_NAPI | tp->event_slow;
+ if (status) {
+ handled = 1;
- handled = 1;
+ rtl_irq_disable(tp);
+ napi_schedule(&tp->napi);
+ }
+ }
+ return IRQ_RETVAL(handled);
+}
- /* Handle all of the error cases first. These will reset
- * the chip, so just exit the loop.
- */
- if (unlikely(!netif_running(dev))) {
- rtl8169_hw_reset(tp);
+/*
+ * Workqueue context.
+ */
+static void rtl_slow_event_work(struct rtl8169_private *tp)
+{
+ struct net_device *dev = tp->dev;
+ u16 status;
+
+ status = rtl_get_events(tp) & tp->event_slow;
+ rtl_ack_events(tp, status);
+
+ if (unlikely(status & RxFIFOOver)) {
+ switch (tp->mac_version) {
+ /* Work around for rx fifo overflow */
+ case RTL_GIGA_MAC_VER_11:
+ netif_stop_queue(dev);
+ /* XXX - Hack alert. See rtl_task(). */
+ set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
+ default:
break;
}
+ }
- if (unlikely(status & RxFIFOOver)) {
- switch (tp->mac_version) {
- /* Work around for rx fifo overflow */
- case RTL_GIGA_MAC_VER_11:
- netif_stop_queue(dev);
- rtl8169_tx_timeout(dev);
- goto done;
- default:
- break;
- }
- }
+ if (unlikely(status & SYSErr))
+ rtl8169_pcierr_interrupt(dev);
- if (unlikely(status & SYSErr)) {
- rtl8169_pcierr_interrupt(dev);
- break;
- }
+ if (status & LinkChg)
+ __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
- if (status & LinkChg)
- __rtl8169_check_link_status(dev, tp, ioaddr, true);
+ napi_disable(&tp->napi);
+ rtl_irq_disable(tp);
- /* We need to see the lastest version of tp->intr_mask to
- * avoid ignoring an MSI interrupt and having to wait for
- * another event which may never come.
- */
- smp_rmb();
- if (status & tp->intr_mask & tp->napi_event) {
- RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
- tp->intr_mask = ~tp->napi_event;
+ napi_enable(&tp->napi);
+ napi_schedule(&tp->napi);
+}
- if (likely(napi_schedule_prep(&tp->napi)))
- __napi_schedule(&tp->napi);
- else
- netif_info(tp, intr, dev,
- "interrupt %04x in poll\n", status);
- }
+static void rtl_task(struct work_struct *work)
+{
+ static const struct {
+ int bitnr;
+ void (*action)(struct rtl8169_private *);
+ } rtl_work[] = {
+ /* XXX - keep rtl_slow_event_work() as first element. */
+ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
+ { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
+ { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
+ };
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, wk.work);
+ struct net_device *dev = tp->dev;
+ int i;
- /* We only get a new MSI interrupt when all active irq
- * sources on the chip have been acknowledged. So, ack
- * everything we've seen and check if new sources have become
- * active to avoid blocking all interrupts from the chip.
- */
- RTL_W16(IntrStatus,
- (status & RxFIFOOver) ? (status | RxOverflow) : status);
- status = RTL_R16(IntrStatus);
+ rtl_lock_work(tp);
+
+ if (!netif_running(dev) ||
+ !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
+ bool pending;
+
+ pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
+ if (pending)
+ rtl_work[i].action(tp);
}
-done:
- return IRQ_RETVAL(handled);
+
+out_unlock:
+ rtl_unlock_work(tp);
}
static int rtl8169_poll(struct napi_struct *napi, int budget)
{
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev;
- void __iomem *ioaddr = tp->mmio_addr;
- int work_done;
+ u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
+ int work_done= 0;
+ u16 status;
+
+ status = rtl_get_events(tp);
+ rtl_ack_events(tp, status & ~tp->event_slow);
+
+ if (status & RTL_EVENT_NAPI_RX)
+ work_done = rtl_rx(dev, tp, (u32) budget);
- work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
- rtl8169_tx_interrupt(dev, tp, ioaddr);
+ if (status & RTL_EVENT_NAPI_TX)
+ rtl_tx(dev, tp);
+
+ if (status & tp->event_slow) {
+ enable_mask &= ~tp->event_slow;
+
+ rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
+ }
if (work_done < budget) {
napi_complete(napi);
- /* We need for force the visibility of tp->intr_mask
- * for other CPUs, as we can loose an MSI interrupt
- * and potentially wait for a retransmit timeout if we don't.
- * The posted write to IntrMask is safe, as it will
- * eventually make it to the chip and we won't loose anything
- * until it does.
- */
- tp->intr_mask = 0xffff;
- wmb();
- RTL_W16(IntrMask, tp->intr_event);
+ rtl_irq_enable(tp, enable_mask);
+ mmiowb();
}
return work_done;
@@ -5924,26 +5599,19 @@ static void rtl8169_down(struct net_device *dev)
del_timer_sync(&tp->timer);
- netif_stop_queue(dev);
-
napi_disable(&tp->napi);
-
- spin_lock_irq(&tp->lock);
+ netif_stop_queue(dev);
rtl8169_hw_reset(tp);
/*
* At this point device interrupts can not be enabled in any function,
- * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
- * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
+ * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
+ * and napi is disabled (rtl8169_poll).
*/
rtl8169_rx_missed(dev, ioaddr);
- spin_unlock_irq(&tp->lock);
-
- synchronize_irq(dev->irq);
-
/* Give a racing hard_start_xmit a few cycles to complete. */
- synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
+ synchronize_sched();
rtl8169_tx_clear(tp);
@@ -5962,9 +5630,13 @@ static int rtl8169_close(struct net_device *dev)
/* Update counters before going down */
rtl8169_update_counters(dev);
+ rtl_lock_work(tp);
+ clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+
rtl8169_down(dev);
+ rtl_unlock_work(tp);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
@@ -5978,77 +5650,127 @@ static int rtl8169_close(struct net_device *dev)
return 0;
}
-static void rtl_set_rx_mode(struct net_device *dev)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void rtl8169_netpoll(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_interrupt(tp->pci_dev->irq, dev);
+}
+#endif
+
+static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp = 0;
+ struct pci_dev *pdev = tp->pci_dev;
+ int retval = -ENOMEM;
- if (dev->flags & IFF_PROMISC) {
- /* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else {
- struct netdev_hw_addr *ha;
+ pm_runtime_get_sync(&pdev->dev);
- rx_mode = AcceptBroadcast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- rx_mode |= AcceptMulticast;
- }
- }
+ /*
+ * Rx and Tx desscriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
+ if (!tp->TxDescArray)
+ goto err_pm_runtime_put;
- spin_lock_irqsave(&tp->lock, flags);
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
+ if (!tp->RxDescArray)
+ goto err_free_tx_0;
- tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+ retval = rtl8169_init_ring(dev);
+ if (retval < 0)
+ goto err_free_rx_1;
- if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
- u32 data = mc_filter[0];
+ INIT_WORK(&tp->wk.work, rtl_task);
- mc_filter[0] = swab32(mc_filter[1]);
- mc_filter[1] = swab32(data);
- }
+ smp_mb();
- RTL_W32(MAR0 + 4, mc_filter[1]);
- RTL_W32(MAR0 + 0, mc_filter[0]);
+ rtl_request_firmware(tp);
- RTL_W32(RxConfig, tmp);
+ retval = request_irq(pdev->irq, rtl8169_interrupt,
+ (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
+ dev->name, dev);
+ if (retval < 0)
+ goto err_release_fw_2;
+
+ rtl_lock_work(tp);
+
+ set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+
+ napi_enable(&tp->napi);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl8169_init_phy(dev, tp);
+
+ __rtl8169_set_features(dev, dev->features);
+
+ rtl_pll_power_up(tp);
+
+ rtl_hw_start(dev);
+
+ netif_start_queue(dev);
+
+ rtl_unlock_work(tp);
+
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
+ rtl8169_check_link_status(dev, tp, ioaddr);
+out:
+ return retval;
+
+err_release_fw_2:
+ rtl_release_firmware(tp);
+ rtl8169_rx_clear(tp);
+err_free_rx_1:
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
+err_free_tx_0:
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ goto out;
}
-/**
- * rtl8169_get_stats - Get rtl8169 read/write statistics
- * @dev: The Ethernet Device to get statistics for
- *
- * Get TX/RX statistics for rtl8169
- */
-static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
+ unsigned int start;
- if (netif_running(dev)) {
- spin_lock_irqsave(&tp->lock, flags);
+ if (netif_running(dev))
rtl8169_rx_missed(dev, ioaddr);
- spin_unlock_irqrestore(&tp->lock, flags);
- }
- return &dev->stats;
+ do {
+ start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ stats->rx_packets = tp->rx_stats.packets;
+ stats->rx_bytes = tp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ stats->tx_packets = tp->tx_stats.packets;
+ stats->tx_bytes = tp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+ stats->rx_length_errors = dev->stats.rx_length_errors;
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_crc_errors = dev->stats.rx_crc_errors;
+ stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ stats->rx_missed_errors = dev->stats.rx_missed_errors;
+
+ return stats;
}
static void rtl8169_net_suspend(struct net_device *dev)
@@ -6058,10 +5780,15 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
- rtl_pll_power_down(tp);
-
netif_device_detach(dev);
netif_stop_queue(dev);
+
+ rtl_lock_work(tp);
+ napi_disable(&tp->napi);
+ clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ rtl_unlock_work(tp);
+
+ rtl_pll_power_down(tp);
}
#ifdef CONFIG_PM
@@ -6084,7 +5811,9 @@ static void __rtl8169_resume(struct net_device *dev)
rtl_pll_power_up(tp);
- rtl8169_schedule_work(dev, rtl8169_reset_task);
+ set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
static int rtl8169_resume(struct device *device)
@@ -6110,10 +5839,10 @@ static int rtl8169_runtime_suspend(struct device *device)
if (!tp->TxDescArray)
return 0;
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
tp->saved_wolopts = __rtl8169_get_wol(tp);
__rtl8169_set_wol(tp, WAKE_ANY);
- spin_unlock_irq(&tp->lock);
+ rtl_unlock_work(tp);
rtl8169_net_suspend(dev);
@@ -6129,10 +5858,10 @@ static int rtl8169_runtime_resume(struct device *device)
if (!tp->TxDescArray)
return 0;
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
__rtl8169_set_wol(tp, tp->saved_wolopts);
tp->saved_wolopts = 0;
- spin_unlock_irq(&tp->lock);
+ rtl_unlock_work(tp);
rtl8169_init_phy(dev, tp);
@@ -6203,12 +5932,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
- spin_lock_irq(&tp->lock);
-
rtl8169_hw_reset(tp);
- spin_unlock_irq(&tp->lock);
-
if (system_state == SYSTEM_POWER_OFF) {
if (__rtl8169_get_wol(tp) & WAKE_ANY) {
rtl_wol_suspend_quirk(tp);
@@ -6222,11 +5947,358 @@ static void rtl_shutdown(struct pci_dev *pdev)
pm_runtime_put_noidle(d);
}
+static void __devexit rtl_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_stop(tp);
+ }
+
+ cancel_work_sync(&tp->wk.work);
+
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /* restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
+
+ rtl_disable_msi(pdev, tp);
+ rtl8169_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct net_device_ops rtl_netdev_ops = {
+ .ndo_open = rtl_open,
+ .ndo_stop = rtl8169_close,
+ .ndo_get_stats64 = rtl8169_get_stats64,
+ .ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_tx_timeout = rtl8169_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = rtl8169_change_mtu,
+ .ndo_fix_features = rtl8169_fix_features,
+ .ndo_set_features = rtl8169_set_features,
+ .ndo_set_mac_address = rtl_set_mac_address,
+ .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_set_rx_mode = rtl_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rtl8169_netpoll,
+#endif
+
+};
+
+static const struct rtl_cfg_info {
+ void (*hw_start)(struct net_device *);
+ unsigned int region;
+ unsigned int align;
+ u16 event_slow;
+ unsigned features;
+ u8 default_ver;
+} rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+ .region = 1,
+ .align = 0,
+ .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+ .features = RTL_FEATURE_GMII,
+ .default_ver = RTL_GIGA_MAC_VER_01,
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+ .region = 2,
+ .align = 8,
+ .event_slow = SYSErr | LinkChg | RxOverflow,
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_11,
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+ .region = 2,
+ .align = 8,
+ .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
+ PCSTimeout,
+ .features = RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_13,
+ }
+};
+
+/* Cfg9346_Unlock assumed. */
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
+ const struct rtl_cfg_info *cfg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned msi = 0;
+ u8 cfg2;
+
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
+ } else {
+ cfg2 |= MSIEnable;
+ msi = RTL_FEATURE_MSI;
+ }
+ }
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
+ return msi;
+}
+
+static int __devinit
+rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
+ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int chipset, i;
+ int rc;
+
+ if (netif_msg_drv(&debug)) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ }
+
+ dev = alloc_etherdev(sizeof (*tp));
+ if (!dev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->netdev_ops = &rtl_netdev_ops;
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
+ mii = &tp->mii;
+ mii->dev = dev;
+ mii->mdio_read = rtl_mdio_read;
+ mii->mdio_write = rtl_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "enable failure\n");
+ goto err_out_free_dev_1;
+ }
+
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "could not request regions\n");
+ goto err_out_mwi_2;
+ }
+
+ tp->cp_cmd = RxChkSum;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_free_res_3;
+ }
+ }
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ if (!ioaddr) {
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res_3;
+ }
+ tp->mmio_addr = ioaddr;
+
+ if (!pci_is_pcie(pdev))
+ netif_info(tp, probe, dev, "not PCI Express\n");
+
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+
+ rtl_init_rxcfg(tp);
+
+ rtl_irq_disable(tp);
+
+ rtl_hw_reset(tp);
+
+ rtl_ack_events(tp, 0xffff);
+
+ pci_set_master(pdev);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ tp->cp_cmd |= RxVlan;
+
+ rtl_init_mdio_ops(tp);
+ rtl_init_pll_power_ops(tp);
+ rtl_init_jumbo_ops(tp);
+
+ rtl8169_print_mac_version(tp);
+
+ chipset = tp->mac_version;
+ tp->txd_version = rtl_chip_infos[chipset].txd_version;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ tp->features |= rtl_try_msi(tp, cfg);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ if (rtl_tbi_enabled(tp)) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+ tp->do_ioctl = rtl_tbi_ioctl;
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ tp->do_ioctl = rtl_xmii_ioctl;
+ }
+
+ mutex_init(&tp->wk.mutex);
+
+ /* Get MAC address */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
+
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
+
+ /* don't enable SG, IP_CSUM and TSO by default - it might not work
+ * properly for all devices */
+ dev->features |= NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features |= NETIF_F_RXALL;
+ dev->hw_features |= NETIF_F_RXFCS;
+
+ tp->hw_start = cfg->hw_start;
+ tp->event_slow = cfg->event_slow;
+
+ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+ ~(RxBOVF | RxFOVF) : ~0;
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long) dev;
+ tp->timer.function = rtl8169_phy_timer;
+
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_out_msi_4;
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
+ rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
+ if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
+ netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
+ "tx checksumming: %s]\n",
+ rtl_chip_infos[chipset].jumbo_max,
+ rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_start(tp);
+ }
+
+ device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ netif_carrier_off(dev);
+
+out:
+ return rc;
+
+err_out_msi_4:
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+err_out_free_res_3:
+ pci_release_regions(pdev);
+err_out_mwi_2:
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+err_out_free_dev_1:
+ free_netdev(dev);
+ goto out;
+}
+
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
- .probe = rtl8169_init_one,
- .remove = __devexit_p(rtl8169_remove_one),
+ .probe = rtl_init_one,
+ .remove = __devexit_p(rtl_remove_one),
.shutdown = rtl_shutdown,
.driver.pm = RTL8169_PM_OPS,
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 87b650131774..8615961c1287 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -34,7 +34,6 @@
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
-#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
@@ -269,6 +268,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .tsu = 1,
};
static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
@@ -654,13 +654,12 @@ static void sh_eth_ring_format(struct net_device *ndev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
- skb = dev_alloc_skb(mdp->rx_buf_sz);
+ skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
- skb->dev = ndev; /* Mark as being used by this device. */
sh_eth_set_receive_align(skb);
/* RX descriptor */
@@ -883,8 +882,8 @@ static int sh_eth_txfree(struct net_device *ndev)
if (entry >= TX_RING_SIZE - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
- mdp->stats.tx_packets++;
- mdp->stats.tx_bytes += txdesc->buffer_length;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += txdesc->buffer_length;
}
return freeNum;
}
@@ -910,23 +909,23 @@ static int sh_eth_rx(struct net_device *ndev)
break;
if (!(desc_status & RDFEND))
- mdp->stats.rx_length_errors++;
+ ndev->stats.rx_length_errors++;
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
- mdp->stats.rx_errors++;
+ ndev->stats.rx_errors++;
if (desc_status & RD_RFS1)
- mdp->stats.rx_crc_errors++;
+ ndev->stats.rx_crc_errors++;
if (desc_status & RD_RFS2)
- mdp->stats.rx_frame_errors++;
+ ndev->stats.rx_frame_errors++;
if (desc_status & RD_RFS3)
- mdp->stats.rx_length_errors++;
+ ndev->stats.rx_length_errors++;
if (desc_status & RD_RFS4)
- mdp->stats.rx_length_errors++;
+ ndev->stats.rx_length_errors++;
if (desc_status & RD_RFS6)
- mdp->stats.rx_missed_errors++;
+ ndev->stats.rx_missed_errors++;
if (desc_status & RD_RFS10)
- mdp->stats.rx_over_errors++;
+ ndev->stats.rx_over_errors++;
} else {
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(
@@ -939,8 +938,8 @@ static int sh_eth_rx(struct net_device *ndev)
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
- mdp->stats.rx_packets++;
- mdp->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += pkt_len;
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % RX_RING_SIZE;
@@ -955,13 +954,12 @@ static int sh_eth_rx(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(mdp->rx_buf_sz);
+ skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
- skb->dev = ndev;
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
@@ -1009,7 +1007,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
felic_stat = sh_eth_read(ndev, ECSR);
sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
if (felic_stat & ECSR_ICD)
- mdp->stats.tx_carrier_errors++;
+ ndev->stats.tx_carrier_errors++;
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
@@ -1042,7 +1040,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
- mdp->stats.tx_aborted_errors++;
+ ndev->stats.tx_aborted_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit Abort\n");
}
@@ -1051,7 +1049,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Receive Abort int */
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
- mdp->stats.rx_frame_errors++;
+ ndev->stats.rx_frame_errors++;
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Abort\n");
}
@@ -1059,21 +1057,21 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_TDE) {
/* Transmit Descriptor Empty int */
- mdp->stats.tx_fifo_errors++;
+ ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
}
if (intr_status & EESR_TFE) {
/* FIFO under flow */
- mdp->stats.tx_fifo_errors++;
+ ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
}
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
- mdp->stats.rx_over_errors++;
+ ndev->stats.rx_over_errors++;
if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
sh_eth_write(ndev, EDRRR_R, EDRRR);
@@ -1083,14 +1081,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
- mdp->stats.rx_fifo_errors++;
+ ndev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
/* Address Error */
- mdp->stats.tx_fifo_errors++;
+ ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp))
dev_err(&ndev->dev, "Address Error\n");
}
@@ -1447,7 +1445,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
" resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
/* tx_errors count up */
- mdp->stats.tx_errors++;
+ ndev->stats.tx_errors++;
/* timer off */
del_timer_sync(&mdp->timer);
@@ -1569,27 +1567,27 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev);
- mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+ ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
sh_eth_write(ndev, 0, TROCR); /* (write clear) */
- mdp->stats.collisions += sh_eth_read(ndev, CDCR);
+ ndev->stats.collisions += sh_eth_read(ndev, CDCR);
sh_eth_write(ndev, 0, CDCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
sh_eth_write(ndev, 0, LCCR); /* (write clear) */
if (sh_eth_is_gether(mdp)) {
- mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
sh_eth_write(ndev, 0, CERCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
sh_eth_write(ndev, 0, CEECR); /* (write clear) */
} else {
- mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
}
pm_runtime_put_sync(&mdp->pdev->dev);
- return &mdp->stats;
+ return &ndev->stats;
}
-/* ioctl to device funciotn*/
+/* ioctl to device function */
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
int cmd)
{
@@ -1606,18 +1604,345 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
}
#if defined(SH_ETH_HAS_TSU)
+/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
+static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
+ int entry)
+{
+ return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
+}
+
+static u32 sh_eth_tsu_get_post_mask(int entry)
+{
+ return 0x0f << (28 - ((entry % 8) * 4));
+}
+
+static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
+{
+ return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
+}
+
+static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
+ int entry)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 tmp;
+ void *reg_offset;
+
+ reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
+ tmp = ioread32(reg_offset);
+ iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
+}
+
+static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
+ int entry)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 post_mask, ref_mask, tmp;
+ void *reg_offset;
+
+ reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
+ post_mask = sh_eth_tsu_get_post_mask(entry);
+ ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
+
+ tmp = ioread32(reg_offset);
+ iowrite32(tmp & ~post_mask, reg_offset);
+
+ /* If other port enables, the function returns "true" */
+ return tmp & ref_mask;
+}
+
+static int sh_eth_tsu_busy(struct net_device *ndev)
+{
+ int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
+ udelay(10);
+ timeout--;
+ if (timeout <= 0) {
+ dev_err(&ndev->dev, "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
+ const u8 *addr)
+{
+ u32 val;
+
+ val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
+ iowrite32(val, reg);
+ if (sh_eth_tsu_busy(ndev) < 0)
+ return -EBUSY;
+
+ val = addr[4] << 8 | addr[5];
+ iowrite32(val, reg + 4);
+ if (sh_eth_tsu_busy(ndev) < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
+{
+ u32 val;
+
+ val = ioread32(reg);
+ addr[0] = (val >> 24) & 0xff;
+ addr[1] = (val >> 16) & 0xff;
+ addr[2] = (val >> 8) & 0xff;
+ addr[3] = val & 0xff;
+ val = ioread32(reg + 4);
+ addr[4] = (val >> 8) & 0xff;
+ addr[5] = val & 0xff;
+}
+
+
+static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ int i;
+ u8 c_addr[ETH_ALEN];
+
+ for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
+ sh_eth_tsu_read_entry(reg_offset, c_addr);
+ if (memcmp(addr, c_addr, ETH_ALEN) == 0)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static int sh_eth_tsu_find_empty(struct net_device *ndev)
+{
+ u8 blank[ETH_ALEN];
+ int entry;
+
+ memset(blank, 0, sizeof(blank));
+ entry = sh_eth_tsu_find_entry(ndev, blank);
+ return (entry < 0) ? -ENOMEM : entry;
+}
+
+static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
+ int entry)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ int ret;
+ u8 blank[ETH_ALEN];
+
+ sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
+ ~(1 << (31 - entry)), TSU_TEN);
+
+ memset(blank, 0, sizeof(blank));
+ ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ int i, ret;
+
+ if (!mdp->cd->tsu)
+ return 0;
+
+ i = sh_eth_tsu_find_entry(ndev, addr);
+ if (i < 0) {
+ /* No entry found, create one */
+ i = sh_eth_tsu_find_empty(ndev);
+ if (i < 0)
+ return -ENOMEM;
+ ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the entry */
+ sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
+ (1 << (31 - i)), TSU_TEN);
+ }
+
+ /* Entry found or created, enable POST */
+ sh_eth_tsu_enable_cam_entry_post(ndev, i);
+
+ return 0;
+}
+
+static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i, ret;
+
+ if (!mdp->cd->tsu)
+ return 0;
+
+ i = sh_eth_tsu_find_entry(ndev, addr);
+ if (i) {
+ /* Entry found */
+ if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
+ goto done;
+
+ /* Disable the entry if both ports was disabled */
+ ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
+ if (ret < 0)
+ return ret;
+ }
+done:
+ return 0;
+}
+
+static int sh_eth_tsu_purge_all(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i, ret;
+
+ if (unlikely(!mdp->cd->tsu))
+ return 0;
+
+ for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
+ if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
+ continue;
+
+ /* Disable the entry if both ports was disabled */
+ ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u8 addr[ETH_ALEN];
+ void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ int i;
+
+ if (unlikely(!mdp->cd->tsu))
+ return;
+
+ for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
+ sh_eth_tsu_read_entry(reg_offset, addr);
+ if (is_multicast_ether_addr(addr))
+ sh_eth_tsu_del_entry(ndev, addr);
+ }
+}
+
/* Multicast reception directions set */
static void sh_eth_set_multicast_list(struct net_device *ndev)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 ecmr_bits;
+ int mcast_all = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ /*
+ * Initial condition is MCT = 1, PRM = 0.
+ * Depending on ndev->flags, set PRM or clear MCT
+ */
+ ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
+
+ if (!(ndev->flags & IFF_MULTICAST)) {
+ sh_eth_tsu_purge_mcast(ndev);
+ mcast_all = 1;
+ }
+ if (ndev->flags & IFF_ALLMULTI) {
+ sh_eth_tsu_purge_mcast(ndev);
+ ecmr_bits &= ~ECMR_MCT;
+ mcast_all = 1;
+ }
+
if (ndev->flags & IFF_PROMISC) {
- /* Set promiscuous. */
- sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
- ECMR_PRM, ECMR);
+ sh_eth_tsu_purge_all(ndev);
+ ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
+ } else if (mdp->cd->tsu) {
+ struct netdev_hw_addr *ha;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (mcast_all && is_multicast_ether_addr(ha->addr))
+ continue;
+
+ if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
+ if (!mcast_all) {
+ sh_eth_tsu_purge_mcast(ndev);
+ ecmr_bits &= ~ECMR_MCT;
+ mcast_all = 1;
+ }
+ }
+ }
} else {
/* Normal, unicast/broadcast-only mode. */
- sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
- ECMR_MCT, ECMR);
+ ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
}
+
+ /* update the ethernet mode */
+ sh_eth_write(ndev, ecmr_bits, ECMR);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+}
+
+static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
+{
+ if (!mdp->port)
+ return TSU_VTAG0;
+ else
+ return TSU_VTAG1;
+}
+
+static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int vtag_reg_index = sh_eth_get_vtag_index(mdp);
+
+ if (unlikely(!mdp->cd->tsu))
+ return -EPERM;
+
+ /* No filtering if vid = 0 */
+ if (!vid)
+ return 0;
+
+ mdp->vlan_num_ids++;
+
+ /*
+ * The controller has one VLAN tag HW filter. So, if the filter is
+ * already enabled, the driver disables it and the filte
+ */
+ if (mdp->vlan_num_ids > 1) {
+ /* disable VLAN filter */
+ sh_eth_tsu_write(mdp, 0, vtag_reg_index);
+ return 0;
+ }
+
+ sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
+ vtag_reg_index);
+
+ return 0;
+}
+
+static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int vtag_reg_index = sh_eth_get_vtag_index(mdp);
+
+ if (unlikely(!mdp->cd->tsu))
+ return -EPERM;
+
+ /* No filtering if vid = 0 */
+ if (!vid)
+ return 0;
+
+ mdp->vlan_num_ids--;
+ sh_eth_tsu_write(mdp, 0, vtag_reg_index);
+
+ return 0;
}
#endif /* SH_ETH_HAS_TSU */
@@ -1768,6 +2093,8 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
#if defined(SH_ETH_HAS_TSU)
.ndo_set_rx_mode = sh_eth_set_multicast_list,
+ .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
#endif
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
@@ -1794,7 +2121,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev) {
- dev_err(&pdev->dev, "Could not allocate device.\n");
ret = -ENOMEM;
goto out;
}
@@ -1862,18 +2188,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
- /* First device only init */
- if (!devno) {
- if (mdp->cd->tsu) {
- struct resource *rtsu;
- rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!rtsu) {
- dev_err(&pdev->dev, "Not found TSU resource\n");
- goto out_release;
- }
- mdp->tsu_addr = ioremap(rtsu->start,
- resource_size(rtsu));
+ /* ioremap the TSU registers */
+ if (mdp->cd->tsu) {
+ struct resource *rtsu;
+ rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "Not found TSU resource\n");
+ goto out_release;
}
+ mdp->tsu_addr = ioremap(rtsu->start,
+ resource_size(rtsu));
+ mdp->port = devno % 2;
+ ndev->features = NETIF_F_HW_VLAN_FILTER;
+ }
+
+ /* initialize first or needed device */
+ if (!devno || pd->needs_init) {
if (mdp->cd->chip_reset)
mdp->cd->chip_reset(ndev);
@@ -1922,7 +2252,8 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- iounmap(mdp->tsu_addr);
+ if (mdp->cd->tsu)
+ iounmap(mdp->tsu_addr);
sh_mdio_release(ndev);
unregister_netdev(ndev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index cdbd844662a7..57dc26261116 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -29,6 +29,8 @@
#define RX_RING_SIZE 64 /* Rx ring size */
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
+#define SH_ETH_TSU_TIMEOUT_MS 500
+#define SH_ETH_TSU_CAM_ENTRIES 32
enum {
/* E-DMAC registers */
@@ -677,6 +679,10 @@ enum TSU_FWSLC_BIT {
TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001,
};
+/* TSU_VTAGn */
+#define TSU_VTAG_ENABLE 0x80000000
+#define TSU_VTAG_VID_MASK 0x00000fff
+
/*
* The sh ether Tx buffer descriptors.
* This structure should be 20 bytes.
@@ -759,7 +765,6 @@ struct sh_eth_private {
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- struct net_device_stats stats;
struct timer_list timer;
spinlock_t lock;
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
@@ -779,6 +784,8 @@ struct sh_eth_private {
char post_rx; /* POST receive */
char post_fw; /* POST forward */
struct net_device_stats tsu_stats; /* TSU forward status */
+ int port; /* for TSU */
+ int vlan_num_ids; /* for VLAN tag filter */
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
@@ -812,6 +819,12 @@ static inline unsigned long sh_eth_read(struct net_device *ndev,
return ioread32(mdp->addr + mdp->reg_offset[enum_index]);
}
+static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
+ int enum_index)
+{
+ return mdp->tsu_addr + mdp->reg_offset[enum_index];
+}
+
static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
unsigned long data, int enum_index)
{
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 22e9c0181ce8..1895605abb35 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -370,12 +370,13 @@ struct s6gmac {
} link;
};
-static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+static void s6gmac_rx_fillfifo(struct net_device *dev)
{
+ struct s6gmac *pd = netdev_priv(dev);
struct sk_buff *skb;
while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
(!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
- (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
pd->io, (u32)skb->data, S6_MAX_FRLEN);
@@ -514,7 +515,7 @@ static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
spin_lock(&pd->lock);
if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
s6gmac_rx_interrupt(dev);
- s6gmac_rx_fillfifo(pd);
+ s6gmac_rx_fillfifo(dev);
if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
s6gmac_tx_interrupt(dev);
s6gmac_stats_interrupt(pd, 0);
@@ -894,7 +895,7 @@ static int s6gmac_open(struct net_device *dev)
s6gmac_init_device(dev);
s6gmac_init_stats(dev);
s6gmac_init_dmac(dev);
- s6gmac_rx_fillfifo(pd);
+ s6gmac_rx_fillfifo(dev);
s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
@@ -960,11 +961,11 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
int res;
unsigned long i;
struct mii_bus *mb;
+
dev = alloc_etherdev(sizeof(*pd));
- if (!dev) {
- printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
+ if (!dev)
return -ENOMEM;
- }
+
dev->open = s6gmac_open;
dev->stop = s6gmac_stop;
dev->hard_start_xmit = s6gmac_tx;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 893c880dadf0..7b819bd8c416 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -643,7 +643,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
if (next_ptr <= this_ptr)
length += RX_END - RX_START;
- skb = dev_alloc_skb(length + 2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (skb) {
unsigned char *buf;
diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
index 60561451789b..798990774446 100644
--- a/drivers/net/ethernet/seeq/seeq8005.c
+++ b/drivers/net/ethernet/seeq/seeq8005.c
@@ -548,7 +548,7 @@ static void seeq8005_rx(struct net_device *dev)
struct sk_buff *skb;
unsigned char *buf;
- skb = dev_alloc_skb(pkt_len);
+ skb = netdev_alloc_skb(dev, pkt_len);
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index f955a19eb22f..bb8c8222122b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -733,7 +733,6 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
dev = alloc_etherdev(sizeof (struct sgiseeq_private));
if (!dev) {
- printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5d18841f0f3d..fb3cbc27063c 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -16,6 +16,21 @@ config SFC_MTD
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
- This exposes the on-board flash memory as MTD devices (e.g.
- /dev/mtd1). This makes it possible to upload new firmware
- to the NIC.
+ This exposes the on-board flash and/or EEPROM as MTD devices
+ (e.g. /dev/mtd1). This is required to update the firmware or
+ the boot configuration under Linux.
+config SFC_MCDI_MON
+ bool "Solarflare SFC9000-family hwmon support"
+ depends on SFC && HWMON && !(SFC=y && HWMON=m)
+ default y
+ ----help---
+ This exposes the on-board firmware-managed sensors as a
+ hardware monitor device.
+config SFC_SRIOV
+ bool "Solarflare SFC9000-family SR-IOV support"
+ depends on SFC && PCI_IOV
+ default y
+ ---help---
+ This enables support for the SFC9000 I/O Virtualization
+ features, allowing accelerated network performance in
+ virtualized environments.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ab31c7124db1..ea1f8db57318 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,7 +2,8 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
falcon_xmac.o mcdi_mac.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o
+ mcdi.o mcdi_phy.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
+sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 098ac2ad757d..b26a954c27fc 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -448,40 +448,40 @@ typedef union efx_oword {
EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
#define EFX_SET_OWORD64(oword, low, high, value) do { \
- (oword).u64[0] = (((oword).u64[0] \
+ (oword).u64[0] = (((oword).u64[0] \
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
| EFX_INSERT64(0, 63, low, high, value)); \
- (oword).u64[1] = (((oword).u64[1] \
+ (oword).u64[1] = (((oword).u64[1] \
& ~EFX_INPLACE_MASK64(64, 127, low, high)) \
| EFX_INSERT64(64, 127, low, high, value)); \
} while (0)
#define EFX_SET_QWORD64(qword, low, high, value) do { \
- (qword).u64[0] = (((qword).u64[0] \
+ (qword).u64[0] = (((qword).u64[0] \
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
| EFX_INSERT64(0, 63, low, high, value)); \
} while (0)
#define EFX_SET_OWORD32(oword, low, high, value) do { \
- (oword).u32[0] = (((oword).u32[0] \
+ (oword).u32[0] = (((oword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
- (oword).u32[1] = (((oword).u32[1] \
+ (oword).u32[1] = (((oword).u32[1] \
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
| EFX_INSERT32(32, 63, low, high, value)); \
- (oword).u32[2] = (((oword).u32[2] \
+ (oword).u32[2] = (((oword).u32[2] \
& ~EFX_INPLACE_MASK32(64, 95, low, high)) \
| EFX_INSERT32(64, 95, low, high, value)); \
- (oword).u32[3] = (((oword).u32[3] \
+ (oword).u32[3] = (((oword).u32[3] \
& ~EFX_INPLACE_MASK32(96, 127, low, high)) \
| EFX_INSERT32(96, 127, low, high, value)); \
} while (0)
#define EFX_SET_QWORD32(qword, low, high, value) do { \
- (qword).u32[0] = (((qword).u32[0] \
+ (qword).u32[0] = (((qword).u32[0] \
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
| EFX_INSERT32(0, 31, low, high, value)); \
- (qword).u32[1] = (((qword).u32[1] \
+ (qword).u32[1] = (((qword).u32[1] \
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
| EFX_INSERT32(32, 63, low, high, value)); \
} while (0)
@@ -531,8 +531,8 @@ typedef union efx_oword {
/* Static initialiser */
-#define EFX_OWORD32(a, b, c, d) \
- { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
+#define EFX_OWORD32(a, b, c, d) \
+ { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
cpu_to_le32(c), cpu_to_le32(d) } }
#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e43702f33b62..3cbfbffe3f00 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -25,6 +25,7 @@
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
+#include "selftest.h"
#include "mcdi.h"
#include "workarounds.h"
@@ -38,15 +39,15 @@
/* Loopback mode names (see LOOPBACK_MODE()) */
const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
-const char *efx_loopback_mode_names[] = {
+const char *const efx_loopback_mode_names[] = {
[LOOPBACK_NONE] = "NONE",
[LOOPBACK_DATA] = "DATAPATH",
[LOOPBACK_GMAC] = "GMAC",
[LOOPBACK_XGMII] = "XGMII",
[LOOPBACK_XGXS] = "XGXS",
- [LOOPBACK_XAUI] = "XAUI",
- [LOOPBACK_GMII] = "GMII",
- [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
[LOOPBACK_XGBR] = "XGBR",
[LOOPBACK_XFI] = "XFI",
[LOOPBACK_XAUI_FAR] = "XAUI_FAR",
@@ -55,21 +56,21 @@ const char *efx_loopback_mode_names[] = {
[LOOPBACK_XFI_FAR] = "XFI_FAR",
[LOOPBACK_GPHY] = "GPHY",
[LOOPBACK_PHYXS] = "PHYXS",
- [LOOPBACK_PCS] = "PCS",
- [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
[LOOPBACK_XPORT] = "XPORT",
[LOOPBACK_XGMII_WS] = "XGMII_WS",
- [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
[LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
- [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
[LOOPBACK_XFI_WS] = "XFI_WS",
[LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
- [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
};
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *efx_reset_type_names[] = {
+const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_WORLD] = "WORLD",
@@ -122,15 +123,6 @@ static int napi_weight = 64;
*/
static unsigned int efx_monitor_interval = 1 * HZ;
-/* This controls whether or not the driver will initialise devices
- * with invalid MAC addresses stored in the EEPROM or flash. If true,
- * such devices will be initialised with a random locally-generated
- * MAC address. This allows for loading the sfc_mtd driver to
- * reprogram the flash, even if the flash contents (including the MAC
- * address) have previously been erased.
- */
-static unsigned int allow_bad_hwaddr;
-
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
@@ -162,7 +154,7 @@ static unsigned int interrupt_mode;
* interrupt handling.
*
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each package (level II cache)
+ * The default (0) means to assign an interrupt to each core.
*/
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
@@ -172,12 +164,12 @@ static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
-static unsigned irq_adapt_low_thresh = 10000;
+static unsigned irq_adapt_low_thresh = 8000;
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
"Threshold score for reducing IRQ moderation");
-static unsigned irq_adapt_high_thresh = 20000;
+static unsigned irq_adapt_high_thresh = 16000;
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
@@ -195,9 +187,13 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
+static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
+static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
-static void efx_init_napi(struct efx_nic *efx);
+static void efx_init_napi_channel(struct efx_channel *channel);
static void efx_fini_napi(struct efx_nic *efx);
static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
@@ -226,27 +222,27 @@ static void efx_stop_all(struct efx_nic *efx);
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
- struct efx_nic *efx = channel->efx;
int spent;
- if (unlikely(efx->reset_pending || !channel->enabled))
+ if (unlikely(!channel->enabled))
return 0;
spent = efx_nic_process_eventq(channel, budget);
- if (spent == 0)
- return 0;
-
- /* Deliver last RX packet. */
- if (channel->rx_pkt) {
- __efx_rx_packet(channel, channel->rx_pkt,
- channel->rx_pkt_csummed);
- channel->rx_pkt = NULL;
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ /* Deliver last RX packet. */
+ if (channel->rx_pkt) {
+ __efx_rx_packet(channel, channel->rx_pkt);
+ channel->rx_pkt = NULL;
+ }
+ if (rx_queue->enabled) {
+ efx_rx_strategy(channel);
+ efx_fast_push_rx_descriptors(rx_queue);
+ }
}
- efx_rx_strategy(channel);
-
- efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
-
return spent;
}
@@ -286,7 +282,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
if (spent < budget) {
- if (channel->channel < efx->n_rx_channels &&
+ if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
@@ -373,7 +369,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
unsigned long entries;
- netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ netif_dbg(efx, probe, efx->net_dev,
"chan %d create event queue\n", channel->channel);
/* Build an event queue with room for one event per tx and rx buffer,
@@ -396,6 +392,34 @@ static void efx_init_eventq(struct efx_channel *channel)
efx_nic_init_eventq(channel);
}
+/* Enable event queue processing and NAPI */
+static void efx_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we enable it. Make sure it's cleared before
+ * then. Similarly, make sure it sees the enabled flag set.
+ */
+ channel->work_pending = false;
+ channel->enabled = true;
+ smp_wmb();
+
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+static void efx_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ channel->enabled = false;
+}
+
static void efx_fini_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
@@ -418,8 +442,7 @@ static void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
-/* Allocate and initialise a channel structure, optionally copying
- * parameters (but not resources) from an old channel structure. */
+/* Allocate and initialise a channel structure. */
static struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
{
@@ -428,45 +451,60 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
struct efx_tx_queue *tx_queue;
int j;
- if (old_channel) {
- channel = kmalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
- *channel = *old_channel;
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
- channel->napi_dev = NULL;
- memset(&channel->eventq, 0, sizeof(channel->eventq));
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
- rx_queue = &channel->rx_queue;
- rx_queue->buffer = NULL;
- memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- if (tx_queue->channel)
- tx_queue->channel = channel;
- tx_queue->buffer = NULL;
- memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
- }
- } else {
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
+ return channel;
+}
- channel->efx = efx;
- channel->channel = i;
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+static struct efx_channel *
+efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- tx_queue->efx = efx;
- tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
tx_queue->channel = channel;
- }
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}
rx_queue = &channel->rx_queue;
- rx_queue->efx = efx;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
(unsigned long)rx_queue);
@@ -482,57 +520,62 @@ static int efx_probe_channel(struct efx_channel *channel)
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"creating channel %d\n", channel->channel);
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
rc = efx_probe_eventq(channel);
if (rc)
- goto fail1;
+ goto fail;
efx_for_each_channel_tx_queue(tx_queue, channel) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
- goto fail2;
+ goto fail;
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
rc = efx_probe_rx_queue(rx_queue);
if (rc)
- goto fail3;
+ goto fail;
}
channel->n_rx_frm_trunc = 0;
return 0;
- fail3:
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_remove_rx_queue(rx_queue);
- fail2:
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_remove_tx_queue(tx_queue);
- fail1:
+fail:
+ efx_remove_channel(channel);
return rc;
}
+static void
+efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+ if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (channel->channel < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
static void efx_set_channel_names(struct efx_nic *efx)
{
struct efx_channel *channel;
- const char *type = "";
- int number;
- efx_for_each_channel(channel, efx) {
- number = channel->channel;
- if (efx->n_channels > efx->n_rx_channels) {
- if (channel->channel < efx->n_rx_channels) {
- type = "-rx";
- } else {
- type = "-tx";
- number -= efx->n_rx_channels;
- }
- }
- snprintf(efx->channel_name[channel->channel],
- sizeof(efx->channel_name[0]),
- "%s%s-%d", efx->name, type, number);
- }
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->channel_name[channel->channel],
+ sizeof(efx->channel_name[0]));
}
static int efx_probe_channels(struct efx_nic *efx)
@@ -543,7 +586,12 @@ static int efx_probe_channels(struct efx_nic *efx)
/* Restart special buffer allocation */
efx->next_buffer_table = 0;
- efx_for_each_channel(channel, efx) {
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -565,7 +613,7 @@ fail:
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
*/
-static void efx_init_channels(struct efx_nic *efx)
+static void efx_start_datapath(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
@@ -584,68 +632,26 @@ static void efx_init_channels(struct efx_nic *efx)
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "init chan %d\n", channel->channel);
-
- efx_init_eventq(channel);
-
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy(channel);
- efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
+ efx_nic_generate_fill_event(rx_queue);
+ }
WARN_ON(channel->rx_pkt != NULL);
efx_rx_strategy(channel);
}
-}
-
-/* This enables event queue processing and packet transmission.
- *
- * Note that this function is not allowed to fail, since that would
- * introduce too much complexity into the suspend/resume path.
- */
-static void efx_start_channel(struct efx_channel *channel)
-{
- struct efx_rx_queue *rx_queue;
-
- netif_dbg(channel->efx, ifup, channel->efx->net_dev,
- "starting chan %d\n", channel->channel);
-
- /* The interrupt handler for this channel may set work_pending
- * as soon as we enable it. Make sure it's cleared before
- * then. Similarly, make sure it sees the enabled flag set. */
- channel->work_pending = false;
- channel->enabled = true;
- smp_wmb();
- /* Fill the queues before enabling NAPI */
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_fast_push_rx_descriptors(rx_queue);
-
- napi_enable(&channel->napi_str);
-}
-
-/* This disables event queue processing and packet transmission.
- * This function does not guarantee that all queue processing
- * (e.g. RX refill) is complete.
- */
-static void efx_stop_channel(struct efx_channel *channel)
-{
- if (!channel->enabled)
- return;
-
- netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
- "stop chan %d\n", channel->channel);
-
- channel->enabled = false;
- napi_disable(&channel->napi_str);
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
}
-static void efx_fini_channels(struct efx_nic *efx)
+static void efx_stop_datapath(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
@@ -672,14 +678,21 @@ static void efx_fini_channels(struct efx_nic *efx)
}
efx_for_each_channel(channel, efx) {
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "shut down chan %d\n", channel->channel);
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_stop_eventq(channel);
+ efx_start_eventq(channel);
+ }
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
- efx_fini_eventq(channel);
}
}
@@ -711,16 +724,40 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
- unsigned i;
- int rc;
+ unsigned i, next_buffer_table = 0;
+ int rc = 0;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
efx_stop_all(efx);
- efx_fini_channels(efx);
+ efx_stop_interrupts(efx, true);
- /* Clone channels */
+ /* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel));
for (i = 0; i < efx->n_channels; i++) {
- channel = efx_alloc_channel(efx, i, efx->channel[i]);
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
if (!channel) {
rc = -ENOMEM;
goto out;
@@ -739,23 +776,31 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
other_channel[i] = channel;
}
- rc = efx_probe_channels(efx);
- if (rc)
- goto rollback;
-
- efx_init_napi(efx);
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
- /* Destroy old channels */
for (i = 0; i < efx->n_channels; i++) {
- efx_fini_napi_channel(other_channel[i]);
- efx_remove_channel(other_channel[i]);
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
}
+
out:
- /* Free unused channel structures */
- for (i = 0; i < efx->n_channels; i++)
- kfree(other_channel[i]);
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_remove_channel(channel);
+ kfree(channel);
+ }
+ }
- efx_init_channels(efx);
+ efx_start_interrupts(efx, true);
efx_start_all(efx);
return rc;
@@ -776,6 +821,18 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .keep_eventq = false,
+};
+
+int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
/**************************************************************************
*
* Port handling
@@ -807,16 +864,14 @@ void efx_link_status_changed(struct efx_nic *efx)
}
/* Status message for kernel log */
- if (link_state->up) {
+ if (link_state->up)
netif_info(efx, link, efx->net_dev,
"link up at %uMbps %s-duplex (MTU %d)%s\n",
link_state->speed, link_state->fd ? "full" : "half",
efx->net_dev->mtu,
(efx->promiscuous ? " [PROMISC]" : ""));
- } else {
+ else
netif_info(efx, link, efx->net_dev, "link down\n");
- }
-
}
void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
@@ -863,11 +918,9 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- /* Serialise the promiscuous flag with efx_set_multicast_list. */
- if (efx_dev_registered(efx)) {
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
- }
+ /* Serialise the promiscuous flag with efx_set_rx_mode. */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
@@ -907,16 +960,13 @@ static void efx_mac_work(struct work_struct *data)
struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
mutex_lock(&efx->mac_lock);
- if (efx->port_enabled) {
- efx->type->push_multicast_hash(efx);
- efx->mac_op->reconfigure(efx);
- }
+ if (efx->port_enabled)
+ efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
static int efx_probe_port(struct efx_nic *efx)
{
- unsigned char *perm_addr;
int rc;
netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -929,28 +979,10 @@ static int efx_probe_port(struct efx_nic *efx)
if (rc)
return rc;
- /* Sanity check MAC address */
- perm_addr = efx->net_dev->perm_addr;
- if (is_valid_ether_addr(perm_addr)) {
- memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
- } else {
- netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
- perm_addr);
- if (!allow_bad_hwaddr) {
- rc = -EINVAL;
- goto err;
- }
- random_ether_addr(efx->net_dev->dev_addr);
- netif_info(efx, probe, efx->net_dev,
- "using locally-generated MAC %pM\n",
- efx->net_dev->dev_addr);
- }
+ /* Initialise MAC address to permanent address */
+ memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
return 0;
-
- err:
- efx->type->remove_port(efx);
- return rc;
}
static int efx_init_port(struct efx_nic *efx)
@@ -969,7 +1001,7 @@ static int efx_init_port(struct efx_nic *efx)
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
- efx->mac_op->reconfigure(efx);
+ efx->type->reconfigure_mac(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
@@ -996,8 +1028,7 @@ static void efx_start_port(struct efx_nic *efx)
/* efx_mac_work() might have been scheduled after efx_stop_port(),
* and then cancelled by efx_flush_all() */
- efx->type->push_multicast_hash(efx);
- efx->mac_op->reconfigure(efx);
+ efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1012,10 +1043,8 @@ static void efx_stop_port(struct efx_nic *efx)
mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */
- if (efx_dev_registered(efx)) {
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
- }
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1069,9 +1098,11 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
- if (pci_dma_supported(pci_dev, dma_mask) &&
- ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
- break;
+ if (pci_dma_supported(pci_dev, dma_mask)) {
+ rc = pci_set_dma_mask(pci_dev, dma_mask);
+ if (rc == 0)
+ break;
+ }
dma_mask >>= 1;
}
if (rc) {
@@ -1144,33 +1175,46 @@ static void efx_fini_io(struct efx_nic *efx)
pci_disable_device(efx->pci_dev);
}
-/* Get number of channels wanted. Each channel will have its own IRQ,
- * 1 RX queue and/or 2 TX queues. */
-static int efx_wanted_channels(void)
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
{
- cpumask_var_t core_mask;
- int count;
+ cpumask_var_t thread_mask;
+ unsigned int count;
int cpu;
- if (rss_cpus)
- return rss_cpus;
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_thread_cpumask(cpu));
+ }
+ }
- if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
- printk(KERN_WARNING
- "sfc: RSS disabled due to allocation failure\n");
- return 1;
+ free_cpumask_var(thread_mask);
}
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, core_mask)) {
- ++count;
- cpumask_or(core_mask, core_mask,
- topology_core_cpumask(cpu));
- }
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+ if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
}
- free_cpumask_var(core_mask);
return count;
}
@@ -1178,7 +1222,8 @@ static int
efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
{
#ifdef CONFIG_RFS_ACCEL
- int i, rc;
+ unsigned int i;
+ int rc;
efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap)
@@ -1201,17 +1246,24 @@ efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
- int max_channels =
- min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
- int rc, i;
+ unsigned int max_channels =
+ min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
+ unsigned int extra_channels = 0;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
- int n_channels;
+ unsigned int n_channels;
- n_channels = efx_wanted_channels();
+ n_channels = efx_wanted_parallelism(efx);
if (separate_tx_channels)
n_channels *= 2;
+ n_channels += extra_channels;
n_channels = min(n_channels, max_channels);
for (i = 0; i < n_channels; i++)
@@ -1220,7 +1272,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (rc > 0) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, n_channels);
+ " available (%d < %u).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
EFX_BUG_ON_PARANOID(rc >= n_channels);
@@ -1231,22 +1283,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (rc == 0) {
efx->n_channels = n_channels;
+ if (n_channels > extra_channels)
+ n_channels -= extra_channels;
if (separate_tx_channels) {
- efx->n_tx_channels =
- max(efx->n_channels / 2, 1U);
- efx->n_rx_channels =
- max(efx->n_channels -
- efx->n_tx_channels, 1U);
+ efx->n_tx_channels = max(n_channels / 2, 1U);
+ efx->n_rx_channels = max(n_channels -
+ efx->n_tx_channels,
+ 1U);
} else {
- efx->n_tx_channels = efx->n_channels;
- efx->n_rx_channels = efx->n_channels;
+ efx->n_tx_channels = n_channels;
+ efx->n_rx_channels = n_channels;
}
rc = efx_init_rx_cpu_rmap(efx, xentries);
if (rc) {
pci_disable_msix(efx->pci_dev);
return rc;
}
- for (i = 0; i < n_channels; i++)
+ for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
} else {
@@ -1280,9 +1333,68 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->legacy_irq = efx->pci_dev->irq;
}
+ /* Assign extra channels if possible */
+ j = efx->n_channels;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
+ efx->n_channels <= extra_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ }
+ }
+
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+ efx->rss_spread = (efx->n_rx_channels > 1 ?
+ efx->n_rx_channels : efx_vf_size(efx));
+
return 0;
}
+/* Enable interrupts, then probe and start the event queues */
+static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+{
+ struct efx_channel *channel;
+
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
+ efx_nic_enable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq || !may_keep_eventq)
+ efx_init_eventq(channel);
+ efx_start_eventq(channel);
+ }
+
+ efx_mcdi_mode_event(efx);
+}
+
+static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+{
+ struct efx_channel *channel;
+
+ efx_mcdi_mode_poll(efx);
+
+ efx_nic_disable_interrupts(efx);
+ if (efx->legacy_irq) {
+ synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq || !may_keep_eventq)
+ efx_fini_eventq(channel);
+ }
+}
+
static void efx_remove_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1333,11 +1445,13 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail;
+ efx->type->dimension_resources(efx);
+
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->n_rx_channels);
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1385,21 +1499,22 @@ static int efx_probe_all(struct efx_nic *efx)
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
- rc = efx_probe_channels(efx);
- if (rc)
- goto fail3;
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
- goto fail4;
+ goto fail3;
}
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto fail4;
+
return 0;
fail4:
- efx_remove_channels(efx);
+ efx_remove_filters(efx);
fail3:
efx_remove_port(efx);
fail2:
@@ -1408,15 +1523,13 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
-/* Called after previous invocation(s) of efx_stop_all, restarts the
- * port, kernel transmit queue, NAPI processing and hardware interrupts,
- * and ensures that the port is scheduled to be reconfigured.
- * This function is safe to call multiple times when the NIC is in any
- * state. */
+/* Called after previous invocation(s) of efx_stop_all, restarts the port,
+ * kernel transmit queues and NAPI processing, and ensures that the port is
+ * scheduled to be reconfigured. This function is safe to call multiple
+ * times when the NIC is in any state.
+ */
static void efx_start_all(struct efx_nic *efx)
{
- struct efx_channel *channel;
-
EFX_ASSERT_RESET_SERIALISED(efx);
/* Check that it is appropriate to restart the interface. All
@@ -1425,31 +1538,11 @@ static void efx_start_all(struct efx_nic *efx)
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
- if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
+ if (!netif_running(efx->net_dev))
return;
- /* Mark the port as enabled so port reconfigurations can start, then
- * restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
-
- if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
- netif_tx_wake_all_queues(efx->net_dev);
-
- efx_for_each_channel(channel, efx)
- efx_start_channel(channel);
-
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
-
- /* Switch to event based MCDI completions after enabling interrupts.
- * If a reset has been scheduled, then we need to stay in polled mode.
- * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
- * reset_pending [modified from an atomic context], we instead guarantee
- * that efx_mcdi_mode_poll() isn't reverted erroneously */
- efx_mcdi_mode_event(efx);
- if (efx->reset_pending)
- efx_mcdi_mode_poll(efx);
+ efx_start_datapath(efx);
/* Start the hardware monitor if there is one. Otherwise (we're link
* event driven), we have to poll the PHY because after an event queue
@@ -1472,8 +1565,9 @@ static void efx_start_all(struct efx_nic *efx)
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
- /* Make sure the hardware monitor is stopped */
+ /* Make sure the hardware monitor and event self-test are stopped */
cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
@@ -1485,8 +1579,6 @@ static void efx_flush_all(struct efx_nic *efx)
* taking locks. */
static void efx_stop_all(struct efx_nic *efx)
{
- struct efx_channel *channel;
-
EFX_ASSERT_RESET_SERIALISED(efx);
/* port_enabled can be read safely under the rtnl lock */
@@ -1494,28 +1586,6 @@ static void efx_stop_all(struct efx_nic *efx)
return;
efx->type->stop_stats(efx);
-
- /* Switch to MCDI polling on Siena before disabling interrupts */
- efx_mcdi_mode_poll(efx);
-
- /* Disable interrupts and wait for ISR to complete */
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
- synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
- efx_for_each_channel(channel, efx) {
- if (channel->irq)
- synchronize_irq(channel->irq);
- }
-
- /* Stop all NAPI processing and synchronous rx refills */
- efx_for_each_channel(channel, efx)
- efx_stop_channel(channel);
-
- /* Stop all asynchronous port reconfigurations. Since all
- * event processing has already been stopped, there is no
- * window to loose phy events */
efx_stop_port(efx);
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
@@ -1523,17 +1593,15 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
- if (efx_dev_registered(efx)) {
- netif_tx_stop_all_queues(efx->net_dev);
- netif_tx_lock_bh(efx->net_dev);
- netif_tx_unlock_bh(efx->net_dev);
- }
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
}
static void efx_remove_all(struct efx_nic *efx)
{
- efx_remove_filters(efx);
efx_remove_channels(efx);
+ efx_remove_filters(efx);
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -1544,13 +1612,13 @@ static void efx_remove_all(struct efx_nic *efx)
*
**************************************************************************/
-static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
+static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
{
if (usecs == 0)
return 0;
- if (usecs < resolution)
+ if (usecs * 1000 < quantum_ns)
return 1; /* never round down to 0 */
- return usecs / resolution;
+ return usecs * 1000 / quantum_ns;
}
/* Set interrupt moderation parameters */
@@ -1559,14 +1627,20 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx)
{
struct efx_channel *channel;
- unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
- unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
+ unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
+ efx->timer_quantum_ns,
+ 1000);
+ unsigned int tx_ticks;
+ unsigned int rx_ticks;
EFX_ASSERT_RESET_SERIALISED(efx);
- if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
+ if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
return -EINVAL;
+ tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
+ rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
+
if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
!rx_may_override_tx) {
netif_err(efx, drv, efx->net_dev, "Channels are shared. "
@@ -1589,8 +1663,14 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive)
{
+ /* We must round up when converting ticks to microseconds
+ * because we round down when converting the other way.
+ */
+
*rx_adaptive = efx->irq_rx_adaptive;
- *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
+ *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
+ efx->timer_quantum_ns,
+ 1000);
/* If channels are shared between RX and TX, so is IRQ
* moderation. Otherwise, IRQ moderation is the same for all
@@ -1599,9 +1679,10 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
if (efx->tx_channel_offset == 0)
*tx_usecs = *rx_usecs;
else
- *tx_usecs =
+ *tx_usecs = DIV_ROUND_UP(
efx->channel[efx->tx_channel_offset]->irq_moderation *
- EFX_IRQ_MOD_RESOLUTION;
+ efx->timer_quantum_ns,
+ 1000);
}
/**************************************************************************
@@ -1664,15 +1745,21 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
*
**************************************************************************/
+static void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+}
+
static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
- channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
- }
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
}
static void efx_fini_napi_channel(struct efx_channel *channel)
@@ -1740,6 +1827,7 @@ static int efx_net_open(struct net_device *net_dev)
efx_link_status_changed(efx);
efx_start_all(efx);
+ efx_selftest_async_start(efx);
return 0;
}
@@ -1757,22 +1845,21 @@ static int efx_net_stop(struct net_device *net_dev)
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */
efx_stop_all(efx);
- efx_fini_channels(efx);
- efx_init_channels(efx);
}
return 0;
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
+
efx->type->update_stats(efx);
- spin_unlock_bh(&efx->stats_lock);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
@@ -1796,6 +1883,8 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
+ spin_unlock_bh(&efx->stats_lock);
+
return stats;
}
@@ -1816,7 +1905,6 @@ static void efx_watchdog(struct net_device *net_dev)
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
- int rc = 0;
EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1827,19 +1915,15 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
- efx_fini_channels(efx);
-
mutex_lock(&efx->mac_lock);
/* Reconfigure the MAC before enabling the dma queues so that
* the RX buffers don't overflow */
net_dev->mtu = new_mtu;
- efx->mac_op->reconfigure(efx);
+ efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
- efx_init_channels(efx);
-
efx_start_all(efx);
- return rc;
+ return 0;
}
static int efx_set_mac_address(struct net_device *net_dev, void *data)
@@ -1854,21 +1938,22 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n",
new_addr);
- return -EINVAL;
+ return -EADDRNOTAVAIL;
}
memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+ efx_sriov_mac_address_changed(efx);
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
- efx->mac_op->reconfigure(efx);
+ efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
return 0;
}
/* Context: netif_addr_lock held, BHs disabled. */
-static void efx_set_multicast_list(struct net_device *net_dev)
+static void efx_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct netdev_hw_addr *ha;
@@ -1922,8 +2007,14 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_rx_mode = efx_set_multicast_list,
+ .ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+#ifdef CONFIG_SFC_SRIOV
+ .ndo_set_vf_mac = efx_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_sriov_get_vf_config,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
@@ -1975,10 +2066,6 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
- /* Clear MAC statistics */
- efx->mac_op->update_stats(efx);
- memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
-
rtnl_lock();
rc = dev_alloc_name(net_dev, net_dev->name);
@@ -1997,7 +2084,7 @@ static int efx_register_netdev(struct efx_nic *efx)
}
/* Always start with carrier off; PHY events will detect the link */
- netif_carrier_off(efx->net_dev);
+ netif_carrier_off(net_dev);
rtnl_unlock();
@@ -2038,11 +2125,9 @@ static void efx_unregister_netdev(struct efx_nic *efx)
efx_release_tx_buffers(tx_queue);
}
- if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
- unregister_netdev(efx->net_dev);
- }
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
}
/**************************************************************************
@@ -2060,7 +2145,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- efx_fini_channels(efx);
+ efx_stop_interrupts(efx, false);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
efx->type->fini(efx);
@@ -2095,10 +2180,11 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n");
}
- efx->mac_op->reconfigure(efx);
+ efx->type->reconfigure_mac(efx);
- efx_init_channels(efx);
+ efx_start_interrupts(efx, false);
efx_restore_filters(efx);
+ efx_sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2292,6 +2378,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
#endif
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
@@ -2300,10 +2387,10 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
efx->net_dev = net_dev;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
- efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
@@ -2361,8 +2448,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
+ efx_stop_interrupts(efx, false);
efx_nic_fini_interrupt(efx);
- efx_fini_channels(efx);
efx_fini_port(efx);
efx->type->fini(efx);
efx_fini_napi(efx);
@@ -2388,6 +2475,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Allow any queued efx_resets() to complete */
rtnl_unlock();
+ efx_stop_interrupts(efx, false);
+ efx_sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -2408,6 +2497,57 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
free_netdev(efx->net_dev);
};
+/* NIC VPD information
+ * Called during probe to display the part number of the
+ * installed NIC. VPD is potentially very large but this should
+ * always appear within the first 512 bytes.
+ */
+#define SFC_VPD_LEN 512
+static void efx_print_product_vpd(struct efx_nic *efx)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ char vpd_data[SFC_VPD_LEN];
+ ssize_t vpd_size;
+ int i, j;
+
+ /* Get the vpd data from the device */
+ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ if (vpd_size <= 0) {
+ netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
+ return;
+ }
+
+ /* Get the Read only section */
+ i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
+ return;
+ }
+
+ j = pci_vpd_lrdt_size(&vpd_data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (i + j > vpd_size)
+ j = vpd_size - i;
+
+ /* Get the Part number */
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Part number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ return;
+ }
+
+ netif_info(efx, drv, efx->net_dev,
+ "Part Number : %.*s\n", j, &vpd_data[i]);
+}
+
+
/* Main body of NIC initialisation
* This is called at module load (or hotplug insertion, theoretically).
*/
@@ -2436,16 +2576,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
goto fail4;
}
- efx_init_channels(efx);
-
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
+ efx_start_interrupts(efx, false);
return 0;
fail5:
- efx_fini_channels(efx);
efx_fini_port(efx);
fail4:
efx->type->fini(efx);
@@ -2459,7 +2597,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
/* NIC initialisation
*
* This is called at module load (or hotplug insertion,
- * theoretically). It sets up PCI mappings, tests and resets the NIC,
+ * theoretically). It sets up PCI mappings, resets the NIC,
* sets up and registers the network devices with the kernel and hooks
* the interrupt service routine. It does not prepare the device for
* transmission; this is left to the first time one of the network
@@ -2471,7 +2609,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
- int i, rc;
+ int rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
@@ -2499,44 +2637,29 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
+ efx_print_product_vpd(efx);
+
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
if (rc)
goto fail2;
- /* No serialisation is required with the reset path because
- * we're in STATE_INIT. */
- for (i = 0; i < 5; i++) {
- rc = efx_pci_probe_main(efx);
-
- /* Serialise against efx_reset(). No more resets will be
- * scheduled since efx_stop_all() has been called, and we
- * have not and never have been registered with either
- * the rtnetlink or driverlink layers. */
- cancel_work_sync(&efx->reset_work);
-
- if (rc == 0) {
- if (efx->reset_pending) {
- /* If there was a scheduled reset during
- * probe, the NIC is probably hosed anyway */
- efx_pci_remove_main(efx);
- rc = -EIO;
- } else {
- break;
- }
- }
+ rc = efx_pci_probe_main(efx);
- /* Retry if a recoverably reset event has been scheduled */
- if (efx->reset_pending &
- ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
- !efx->reset_pending)
- goto fail3;
+ /* Serialise against efx_reset(). No more resets will be
+ * scheduled since efx_stop_all() has been called, and we have
+ * not and never have been registered.
+ */
+ cancel_work_sync(&efx->reset_work);
- efx->reset_pending = 0;
- }
+ if (rc)
+ goto fail3;
- if (rc) {
- netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
+ /* If there was a scheduled reset during probe, the NIC is
+ * probably hosed anyway.
+ */
+ if (efx->reset_pending) {
+ rc = -EIO;
goto fail4;
}
@@ -2546,18 +2669,27 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_register_netdev(efx);
if (rc)
- goto fail5;
+ goto fail4;
+
+ rc = efx_sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+ /* Try to create MTDs, but allow this to fail */
rtnl_lock();
- efx_mtd_probe(efx); /* allowed to fail */
+ rc = efx_mtd_probe(efx);
rtnl_unlock();
+ if (rc)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to create MTDs (%d)\n", rc);
+
return 0;
- fail5:
- efx_pci_remove_main(efx);
fail4:
+ efx_pci_remove_main(efx);
fail3:
efx_fini_io(efx);
fail2:
@@ -2578,7 +2710,7 @@ static int efx_pm_freeze(struct device *dev)
netif_device_detach(efx->net_dev);
efx_stop_all(efx);
- efx_fini_channels(efx);
+ efx_stop_interrupts(efx, false);
return 0;
}
@@ -2589,7 +2721,7 @@ static int efx_pm_thaw(struct device *dev)
efx->state = STATE_INIT;
- efx_init_channels(efx);
+ efx_start_interrupts(efx, false);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
@@ -2658,7 +2790,7 @@ static int efx_pm_suspend(struct device *dev)
return rc;
}
-static struct dev_pm_ops efx_pm_ops = {
+static const struct dev_pm_ops efx_pm_ops = {
.suspend = efx_pm_suspend,
.resume = efx_pm_resume,
.freeze = efx_pm_freeze,
@@ -2695,6 +2827,10 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
+ rc = efx_init_sriov();
+ if (rc)
+ goto err_sriov;
+
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
@@ -2710,6 +2846,8 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
+ efx_fini_sriov();
+ err_sriov:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -2721,6 +2859,7 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
+ efx_fini_sriov();
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3541ac6ea01..be8f9158a714 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -40,9 +40,9 @@ extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf, bool checksummed);
+ struct efx_rx_buffer *rx_buf);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, bool checksummed, bool discard);
+ unsigned int len, u16 flags);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -95,6 +95,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#endif
/* Channels */
+extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
@@ -145,6 +146,12 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
napi_schedule(&channel->napi_str);
}
+static inline void efx_schedule_channel_irq(struct efx_channel *channel)
+{
+ channel->event_test_cpu = raw_smp_processor_id();
+ efx_schedule_channel(channel);
+}
+
extern void efx_link_status_changed(struct efx_nic *efx);
extern void efx_link_set_advertising(struct efx_nic *efx, u32);
extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 29b2ebfef19f..f22f45f515a8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -52,11 +52,6 @@ static u64 efx_get_uint_stat(void *field)
return *(unsigned int *)field;
}
-static u64 efx_get_ulong_stat(void *field)
-{
- return *(unsigned long *)field;
-}
-
static u64 efx_get_u64_stat(void *field)
{
return *(u64 *) field;
@@ -67,12 +62,8 @@ static u64 efx_get_atomic_stat(void *field)
return atomic_read((atomic_t *) field);
}
-#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- unsigned long, efx_get_ulong_stat)
-
#define EFX_ETHTOOL_U64_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
+ EFX_ETHTOOL_STAT(field, mac_stats, field, \
u64, efx_get_u64_stat)
#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
@@ -91,36 +82,36 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
-static struct efx_ethtool_stat efx_ethtool_stats[] = {
+static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_control),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_64),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
@@ -128,34 +119,34 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
- EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_good),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_control),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_64),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
@@ -404,10 +395,6 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
&tests->eventq_int[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.int", NULL);
- efx_fill_test(n++, strings, data,
- &tests->eventq_poll[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.poll", NULL);
}
efx_fill_test(n++, strings, data, &tests->registers,
@@ -486,16 +473,17 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
- struct efx_ethtool_stat *stat;
+ const struct efx_ethtool_stat *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- struct rtnl_link_stats64 temp;
int i;
EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
+ spin_lock_bh(&efx->stats_lock);
+
/* Update MAC and NIC statistics */
- dev_get_stats(net_dev, &temp);
+ efx->type->update_stats(efx);
/* Fill detailed statistics buffer */
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -525,6 +513,8 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
break;
}
}
+
+ spin_unlock_bh(&efx->stats_lock);
}
static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -747,7 +737,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Recover by resetting the EM block */
falcon_stop_nic_stats(efx);
falcon_drain_tx_fifo(efx);
- efx->mac_op->reconfigure(efx);
+ falcon_reconfigure_xmac(efx);
falcon_start_nic_stats(efx);
} else {
/* Schedule a reset to recover */
@@ -772,7 +762,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
- efx->mac_op->reconfigure(efx);
+ efx->type->reconfigure_mac(efx);
out:
mutex_unlock(&efx->mac_lock);
@@ -818,11 +808,16 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
+/* MAC address mask including only MC flag */
+static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
{
struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
u16 vid;
u8 proto;
@@ -838,11 +833,18 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
else
rule->ring_cookie = spec.dmaq_id;
- rc = efx_filter_get_eth_local(&spec, &vid,
- rule->h_u.ether_spec.h_dest);
+ if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
+ rule->flow_type = ETHER_FLOW;
+ memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
+ if (spec.type == EFX_FILTER_MC_DEF)
+ memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
+ return 0;
+ }
+
+ rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
if (rc == 0) {
rule->flow_type = ETHER_FLOW;
- memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN);
+ memset(mac_mask->h_dest, ~0, ETH_ALEN);
if (vid != EFX_FILTER_VID_UNSPEC) {
rule->flow_type |= FLOW_EXT;
rule->h_ext.vlan_tci = htons(vid);
@@ -1011,27 +1013,40 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
}
case ETHER_FLOW | FLOW_EXT:
- /* Must match all or none of VID */
- if (rule->m_ext.vlan_tci != htons(0xfff) &&
- rule->m_ext.vlan_tci != 0)
- return -EINVAL;
- case ETHER_FLOW:
- /* Must match all of destination */
- if (!is_broadcast_ether_addr(mac_mask->h_dest))
- return -EINVAL;
- /* and nothing else */
+ case ETHER_FLOW: {
+ u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
+ ntohs(rule->m_ext.vlan_tci) : 0);
+
+ /* Must not match on source address or Ethertype */
if (!is_zero_ether_addr(mac_mask->h_source) ||
mac_mask->h_proto)
return -EINVAL;
- rc = efx_filter_set_eth_local(
- &spec,
- (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ?
- ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
- mac_entry->h_dest);
+ /* Is it a default UC or MC filter? */
+ if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) &&
+ vlan_tag_mask == 0) {
+ if (is_multicast_ether_addr(mac_entry->h_dest))
+ rc = efx_filter_set_mc_def(&spec);
+ else
+ rc = efx_filter_set_uc_def(&spec);
+ }
+ /* Otherwise, it must match all of destination and all
+ * or none of VID.
+ */
+ else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
+ (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
+ rc = efx_filter_set_eth_local(
+ &spec,
+ vlan_tag_mask ?
+ ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
+ mac_entry->h_dest);
+ } else {
+ rc = -EINVAL;
+ }
if (rc)
return rc;
break;
+ }
default:
return -EINVAL;
@@ -1070,7 +1085,8 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+ return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
+ efx->n_rx_channels == 1) ?
0 : ARRAY_SIZE(efx->rx_indir_table));
}
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ae1ebd35397..3a1ca2bd1548 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -19,7 +19,6 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
-#include "mac.h"
#include "spi.h"
#include "nic.h"
#include "regs.h"
@@ -89,7 +88,7 @@ static int falcon_getscl(void *data)
return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
}
-static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
+static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.setsda = falcon_setsda,
.setscl = falcon_setscl,
.getsda = falcon_getsda,
@@ -104,8 +103,6 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
efx_dword_t timer_cmd;
struct efx_nic *efx = channel->efx;
- BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_AB_TC_TIMER_VAL_WIDTH));
-
/* Set timer register */
if (channel->irq_moderation) {
EFX_POPULATE_DWORD_2(timer_cmd,
@@ -177,27 +174,24 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ /* Check to see if we have a serious error condition */
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
-
- /* Check to see if we have a serious error condition */
- if (queues & (1U << efx->fatal_irq_level)) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- }
-
EFX_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
if (queues & 1)
- efx_schedule_channel(efx_get_channel(efx, 0));
+ efx_schedule_channel_irq(efx_get_channel(efx, 0));
if (queues & 2)
- efx_schedule_channel(efx_get_channel(efx, 1));
+ efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
/**************************************************************************
@@ -613,7 +607,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
nic_data->stats_pending = false;
if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
rmb(); /* read the done flag before the stats */
- efx->mac_op->update_stats(efx);
+ falcon_update_stats_xmac(efx);
} else {
netif_err(efx, hw, efx->net_dev,
"timed out waiting for statistics\n");
@@ -670,7 +664,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
falcon_reset_macs(efx);
efx->phy_op->reconfigure(efx);
- rc = efx->mac_op->reconfigure(efx);
+ rc = falcon_reconfigure_xmac(efx);
BUG_ON(rc);
falcon_start_nic_stats(efx);
@@ -1218,7 +1212,7 @@ static void falcon_monitor(struct efx_nic *efx)
falcon_deconfigure_mac_wrapper(efx);
falcon_reset_macs(efx);
- rc = efx->mac_op->reconfigure(efx);
+ rc = falcon_reconfigure_xmac(efx);
BUG_ON(rc);
falcon_start_nic_stats(efx);
@@ -1339,6 +1333,12 @@ out:
return rc;
}
+static void falcon_dimension_resources(struct efx_nic *efx)
+{
+ efx->rx_dc_base = 0x20000;
+ efx->tx_dc_base = 0x26000;
+}
+
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
@@ -1472,6 +1472,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ efx->timer_quantum_ns = 4968; /* 621 cycles */
+
/* Initialise I2C adapter */
board = falcon_board(efx);
board->i2c_adap.owner = THIS_MODULE;
@@ -1676,7 +1678,7 @@ static void falcon_update_nic_stats(struct efx_nic *efx)
*nic_data->stats_dma_done == FALCON_STATS_DONE) {
nic_data->stats_pending = false;
rmb(); /* read the done flag before the stats */
- efx->mac_op->update_stats(efx);
+ falcon_update_stats_xmac(efx);
}
}
@@ -1753,6 +1755,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
+ .dimension_resources = falcon_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
@@ -1767,13 +1770,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
- .push_multicast_hash = falcon_push_multicast_hash,
.reconfigure_port = falcon_reconfigure_port,
+ .reconfigure_mac = falcon_reconfigure_xmac,
+ .check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
.test_nvram = falcon_test_nvram,
- .default_mac_ops = &falcon_xmac_operations,
.revision = EFX_REV_FALCON_A1,
.mem_map_size = 0x20000,
@@ -1786,8 +1789,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.rx_buffer_padding = 0x24,
.max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
- .tx_dc_base = 0x130000,
- .rx_dc_base = 0x100000,
+ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
};
@@ -1795,6 +1797,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
+ .dimension_resources = falcon_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
@@ -1809,14 +1812,14 @@ const struct efx_nic_type falcon_b0_nic_type = {
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
- .push_multicast_hash = falcon_push_multicast_hash,
.reconfigure_port = falcon_reconfigure_port,
+ .reconfigure_mac = falcon_reconfigure_xmac,
+ .check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
.test_registers = falcon_b0_test_registers,
.test_nvram = falcon_test_nvram,
- .default_mac_ops = &falcon_xmac_operations,
.revision = EFX_REV_FALCON_B0,
/* Map everything up to and including the RSS indirection
@@ -1837,8 +1840,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
- .tx_dc_base = 0x130000,
- .rx_dc_base = 0x100000,
+ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 6cc16b8cc6f4..8687a6c3db0d 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -87,7 +87,7 @@ static const u8 falcon_lm87_common_regs[] = {
0
};
-static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
@@ -179,7 +179,7 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
#else /* !CONFIG_SENSORS_LM87 */
static inline int
-efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
return 0;
@@ -442,7 +442,7 @@ static int sfe4001_check_hw(struct efx_nic *efx)
return (status < 0) ? -EIO : -ERANGE;
}
-static struct i2c_board_info sfe4001_hwmon_info = {
+static const struct i2c_board_info sfe4001_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
};
@@ -522,7 +522,7 @@ static const u8 sfe4002_lm87_regs[] = {
0
};
-static struct i2c_board_info sfe4002_hwmon_info = {
+static const struct i2c_board_info sfe4002_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4002_lm87_channel,
};
@@ -591,7 +591,7 @@ static const u8 sfn4112f_lm87_regs[] = {
0
};
-static struct i2c_board_info sfn4112f_hwmon_info = {
+static const struct i2c_board_info sfn4112f_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfn4112f_lm87_channel,
};
@@ -653,7 +653,7 @@ static const u8 sfe4003_lm87_regs[] = {
0
};
-static struct i2c_board_info sfe4003_hwmon_info = {
+static const struct i2c_board_info sfe4003_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4003_lm87_channel,
};
@@ -709,8 +709,6 @@ static int sfe4003_init(struct efx_nic *efx)
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
- .ref_model = "SFE4001",
- .gen_type = "10GBASE-T adapter",
.init = sfe4001_init,
.init_phy = efx_port_dummy_op_void,
.fini = sfe4001_fini,
@@ -719,8 +717,6 @@ static const struct falcon_board_type board_types[] = {
},
{
.id = FALCON_BOARD_SFE4002,
- .ref_model = "SFE4002",
- .gen_type = "XFP adapter",
.init = sfe4002_init,
.init_phy = sfe4002_init_phy,
.fini = efx_fini_lm87,
@@ -729,8 +725,6 @@ static const struct falcon_board_type board_types[] = {
},
{
.id = FALCON_BOARD_SFE4003,
- .ref_model = "SFE4003",
- .gen_type = "10GBASE-CX4 adapter",
.init = sfe4003_init,
.init_phy = sfe4003_init_phy,
.fini = efx_fini_lm87,
@@ -739,8 +733,6 @@ static const struct falcon_board_type board_types[] = {
},
{
.id = FALCON_BOARD_SFN4112F,
- .ref_model = "SFN4112F",
- .gen_type = "SFP+ adapter",
.init = sfn4112f_init,
.init_phy = sfn4112f_init_phy,
.fini = efx_fini_lm87,
@@ -763,11 +755,6 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
board->type = &board_types[i];
if (board->type) {
- netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
- (efx->pci_dev->subsystem_vendor ==
- PCI_VENDOR_ID_SOLARFLARE)
- ? board->type->ref_model : board->type->gen_type,
- 'A' + board->major, board->minor);
return 0;
} else {
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 9516452c079c..6106ef15dee3 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -14,7 +14,6 @@
#include "nic.h"
#include "regs.h"
#include "io.h"
-#include "mac.h"
#include "mdio_10g.h"
#include "workarounds.h"
@@ -139,7 +138,7 @@ static bool falcon_xmac_link_ok(struct efx_nic *efx)
return (efx->loopback_mode == LOOPBACK_XGMII ||
falcon_xgxs_link_ok(efx)) &&
(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
- LOOPBACK_INTERNAL(efx) ||
+ LOOPBACK_INTERNAL(efx) ||
efx_mdio_phyxgxs_lane_sync(efx));
}
@@ -270,12 +269,12 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
return mac_up;
}
-static bool falcon_xmac_check_fault(struct efx_nic *efx)
+bool falcon_xmac_check_fault(struct efx_nic *efx)
{
return !falcon_xmac_link_ok_retry(efx, 5);
}
-static int falcon_reconfigure_xmac(struct efx_nic *efx)
+int falcon_reconfigure_xmac(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -290,7 +289,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
return 0;
}
-static void falcon_update_stats_xmac(struct efx_nic *efx)
+void falcon_update_stats_xmac(struct efx_nic *efx)
{
struct efx_mac_stats *mac_stats = &efx->mac_stats;
@@ -361,9 +360,3 @@ void falcon_poll_xmac(struct efx_nic *efx)
nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_ack_status_intr(efx);
}
-
-const struct efx_mac_operations falcon_xmac_operations = {
- .reconfigure = falcon_reconfigure_xmac,
- .update_stats = falcon_update_stats_xmac,
- .check_fault = falcon_xmac_check_fault,
-};
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 1fbbbee7b1ae..fea7f7300675 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -35,9 +35,17 @@
enum efx_filter_table_id {
EFX_FILTER_TABLE_RX_IP = 0,
EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_RX_DEF,
+ EFX_FILTER_TABLE_TX_MAC,
EFX_FILTER_TABLE_COUNT,
};
+enum efx_filter_index {
+ EFX_FILTER_INDEX_UC_DEF,
+ EFX_FILTER_INDEX_MC_DEF,
+ EFX_FILTER_SIZE_RX_DEF,
+};
+
struct efx_filter_table {
enum efx_filter_table_id id;
u32 offset; /* address of table relative to BAR */
@@ -90,8 +98,9 @@ efx_filter_spec_table_id(const struct efx_filter_spec *spec)
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
- return spec->type >> 2;
+ return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
}
static struct efx_filter_table *
@@ -109,7 +118,7 @@ static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
memset(table->search_depth, 0, sizeof(table->search_depth));
}
-static void efx_filter_push_rx_limits(struct efx_nic *efx)
+static void efx_filter_push_rx_config(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table;
@@ -143,9 +152,58 @@ static void efx_filter_push_rx_limits(struct efx_nic *efx)
FILTER_CTL_SRCH_FUDGE_WILD);
}
+ table = &state->table[EFX_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
+ !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
+ !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP));
+ }
+
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
}
+static void efx_filter_push_tx_limits(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table;
+ efx_oword_t tx_cfg;
+
+ efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EFX_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_depth[EFX_FILTER_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_depth[EFX_FILTER_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
__be32 host1, __be16 port1,
__be32 host2, __be16 port2)
@@ -300,7 +358,8 @@ int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr)
{
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+ EFX_BUG_ON_PARANOID(!(spec->flags &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
@@ -319,6 +378,52 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
return 0;
}
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
+
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EINVAL;
+
+ spec->type = EFX_FILTER_UC_DEF;
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
+
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EINVAL;
+
+ spec->type = EFX_FILTER_MC_DEF;
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ return 0;
+}
+
+static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
+ struct efx_filter_spec *spec = &table->spec[filter_idx];
+
+ efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
+ EFX_FILTER_FLAG_RX_RSS, 0);
+ spec->type = EFX_FILTER_UC_DEF + filter_idx;
+ table->used_bitmap[0] |= 1 << filter_idx;
+}
+
int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
u16 *vid, u8 *addr)
{
@@ -366,6 +471,13 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
break;
}
+ case EFX_FILTER_TABLE_RX_DEF:
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
+ EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
+ return spec->type - EFX_FILTER_UC_DEF;
+
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_8(
@@ -385,6 +497,18 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
break;
}
+ case EFX_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
default:
BUG();
}
@@ -399,6 +523,10 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
memcmp(left->data, right->data, sizeof(left->data)))
return false;
+ if (left->flags & EFX_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
return true;
}
@@ -448,23 +576,40 @@ static int efx_filter_search(struct efx_filter_table *table,
* MAC filters without overriding behaviour.
*/
+#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0
+#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1
+#define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2
+
#define EFX_FILTER_INDEX_WIDTH 13
#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
unsigned int index, u8 flags)
{
- return (table_id == EFX_FILTER_TABLE_RX_MAC &&
- flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ?
- index :
- (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index;
+ unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
+
+ if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
+ if (table_id == EFX_FILTER_TABLE_RX_MAC)
+ match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
+ else if (table_id == EFX_FILTER_TABLE_RX_DEF)
+ match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
+ }
+
+ return match_pri << EFX_FILTER_INDEX_WIDTH | index;
}
static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
{
- return (id <= EFX_FILTER_INDEX_MASK) ?
- EFX_FILTER_TABLE_RX_MAC :
- (id >> EFX_FILTER_INDEX_WIDTH) - 1;
+ unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+
+ switch (match_pri) {
+ case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
+ return EFX_FILTER_TABLE_RX_MAC;
+ case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
+ return EFX_FILTER_TABLE_RX_DEF;
+ default:
+ return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
+ }
}
static inline unsigned int efx_filter_id_index(u32 id)
@@ -474,23 +619,30 @@ static inline unsigned int efx_filter_id_index(u32 id)
static inline u8 efx_filter_id_flags(u32 id)
{
- return (id <= EFX_FILTER_INDEX_MASK) ?
- EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP :
- EFX_FILTER_FLAG_RX;
+ unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+
+ if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
+ return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
+ else if (match_pri <=
+ EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
+ return EFX_FILTER_FLAG_RX;
+ else
+ return EFX_FILTER_FLAG_TX;
}
u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
+ unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
- if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0)
- return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH)
- + state->table[EFX_FILTER_TABLE_RX_MAC].size;
- else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0)
- return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH)
- + state->table[EFX_FILTER_TABLE_RX_IP].size;
- else
- return 0;
+ do {
+ if (state->table[table_id].size != 0)
+ return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
+ << EFX_FILTER_INDEX_WIDTH) +
+ state->table[table_id].size;
+ } while (table_id--);
+
+ return 0;
}
/**
@@ -548,12 +700,20 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
}
*saved_spec = *spec;
- if (table->search_depth[spec->type] < depth) {
- table->search_depth[spec->type] = depth;
- efx_filter_push_rx_limits(efx);
- }
+ if (table->id == EFX_FILTER_TABLE_RX_DEF) {
+ efx_filter_push_rx_config(efx);
+ } else {
+ if (table->search_depth[spec->type] < depth) {
+ table->search_depth[spec->type] = depth;
+ if (spec->flags & EFX_FILTER_FLAG_TX)
+ efx_filter_push_tx_limits(efx);
+ else
+ efx_filter_push_rx_config(efx);
+ }
- efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
@@ -571,7 +731,11 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
{
static efx_oword_t filter;
- if (test_bit(filter_idx, table->used_bitmap)) {
+ if (table->id == EFX_FILTER_TABLE_RX_DEF) {
+ /* RX default filters must always exist */
+ efx_filter_reset_rx_def(efx, filter_idx);
+ efx_filter_push_rx_config(efx);
+ } else if (test_bit(filter_idx, table->used_bitmap)) {
__clear_bit(filter_idx, table->used_bitmap);
--table->used;
memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
@@ -617,7 +781,8 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
spin_lock_bh(&state->lock);
if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority && spec->flags == filter_flags) {
+ spec->priority == priority &&
+ !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
@@ -668,7 +833,8 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
spin_lock_bh(&state->lock);
if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority && spec->flags == filter_flags) {
+ spec->priority == priority &&
+ !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
*spec_buf = *spec;
rc = 0;
} else {
@@ -722,7 +888,7 @@ u32 efx_filter_count_rx_used(struct efx_nic *efx,
spin_lock_bh(&state->lock);
for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id <= EFX_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
@@ -750,7 +916,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
spin_lock_bh(&state->lock);
for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id <= EFX_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
@@ -785,6 +951,11 @@ void efx_restore_filters(struct efx_nic *efx)
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap))
continue;
@@ -794,7 +965,8 @@ void efx_restore_filters(struct efx_nic *efx)
}
}
- efx_filter_push_rx_limits(efx);
+ efx_filter_push_rx_config(efx);
+ efx_filter_push_tx_limits(efx);
spin_unlock_bh(&state->lock);
}
@@ -833,6 +1005,16 @@ int efx_probe_filters(struct efx_nic *efx)
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
}
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
@@ -849,6 +1031,15 @@ int efx_probe_filters(struct efx_nic *efx)
goto fail;
}
+ if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
+ /* RX default filters must always exist */
+ unsigned i;
+ for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
+ efx_filter_reset_rx_def(efx, i);
+ }
+
+ efx_filter_push_rx_config(efx);
+
return 0;
fail:
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3d4108cd90ca..3c77802aed6c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -20,6 +20,8 @@
* @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
* @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
* @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
+ * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
+ * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
* @EFX_FILTER_UNSPEC: Match type is unspecified
*
* Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
@@ -31,6 +33,8 @@ enum efx_filter_type {
EFX_FILTER_UDP_WILD,
EFX_FILTER_MAC_FULL = 4,
EFX_FILTER_MAC_WILD,
+ EFX_FILTER_UC_DEF = 8,
+ EFX_FILTER_MC_DEF,
EFX_FILTER_TYPE_COUNT, /* number of specific types */
EFX_FILTER_UNSPEC = 0xf,
};
@@ -39,7 +43,8 @@ enum efx_filter_type {
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
- * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
+ * networking and SR-IOV)
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
@@ -60,12 +65,14 @@ enum efx_filter_priority {
* any IP filter that matches the same packet. By default, IP
* filters take precedence.
* @EFX_FILTER_FLAG_RX: Filter is for RX
+ * @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
+ EFX_FILTER_FLAG_TX = 0x10,
};
/**
@@ -103,6 +110,15 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
spec->dmaq_id = rxq_id;
}
+static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
+ unsigned txq_id)
+{
+ spec->type = EFX_FILTER_UNSPEC;
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = EFX_FILTER_FLAG_TX;
+ spec->dmaq_id = txq_id;
+}
+
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port);
extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
@@ -117,6 +133,8 @@ extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr);
extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
u16 *vid, u8 *addr);
+extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
+extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
diff --git a/drivers/net/ethernet/sfc/mac.h b/drivers/net/ethernet/sfc/mac.h
deleted file mode 100644
index d6a255d0856b..000000000000
--- a/drivers/net/ethernet/sfc/mac.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_MAC_H
-#define EFX_MAC_H
-
-#include "net_driver.h"
-
-extern const struct efx_mac_operations falcon_xmac_operations;
-extern const struct efx_mac_operations efx_mcdi_mac_operations;
-extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear);
-
-#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81a425397468..17b6463e459c 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -22,22 +22,22 @@
**************************************************************************
*/
-/* Software-defined structure to the shared-memory */
-#define CMD_NOTIFY_PORT0 0
-#define CMD_NOTIFY_PORT1 4
-#define CMD_PDU_PORT0 0x008
-#define CMD_PDU_PORT1 0x108
-#define REBOOT_FLAG_PORT0 0x3f8
-#define REBOOT_FLAG_PORT1 0x3fc
-
#define MCDI_RPC_TIMEOUT 10 /*seconds */
#define MCDI_PDU(efx) \
- (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
#define MCDI_DOORBELL(efx) \
- (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
-#define MCDI_REBOOT_FLAG(efx) \
- (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
+ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
+#define MCDI_STATUS(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
+
+/* A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set. */
+#define MCDI_STATUS_DELAY_US 100
+#define MCDI_STATUS_DELAY_COUNT 100
+#define MCDI_STATUS_SLEEP_MS \
+ (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
#define SEQ_MASK \
EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
@@ -77,7 +77,7 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
u32 xflags, seqno;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(inlen & 3 || inlen >= 0x100);
+ BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
@@ -111,7 +111,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(outlen & 3 || outlen >= 0x100);
+ BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
for (i = 0; i < outlen; i += 4)
*((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
@@ -210,7 +210,7 @@ out:
/* Test and clear MC-rebooted flag for this port/function */
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
efx_dword_t reg;
uint32_t value;
@@ -384,6 +384,11 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
netif_dbg(efx, hw, efx->net_dev,
"MC command 0x%x inlen %d failed rc=%d\n",
cmd, (int)inlen, -rc);
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ }
}
efx_mcdi_release(mcdi);
@@ -465,10 +470,20 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
mcdi->resplen = 0;
++mcdi->credits;
}
- } else
+ } else {
+ int count;
+
/* Nobody was waiting for an MCDI request, so trigger a reset */
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ /* Consume the status word since efx_mcdi_rpc_finish() won't */
+ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
+ if (efx_mcdi_poll_reboot(efx))
+ break;
+ udelay(MCDI_STATUS_DELAY_US);
+ }
+ }
+
spin_unlock(&mcdi->iface_lock);
}
@@ -502,49 +517,6 @@ static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
efx_link_status_changed(efx);
}
-static const char *sensor_names[] = {
- [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
- [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
- [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
- [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
- [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
- [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
- [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
- [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
- [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
- [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
- [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
- [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
- [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
-};
-
-static const char *sensor_status_names[] = {
- [MC_CMD_SENSOR_STATE_OK] = "OK",
- [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
- [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
- [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
-};
-
-static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
-{
- unsigned int monitor, state, value;
- const char *name, *state_txt;
- monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
- state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
- value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
- /* Deal gracefully with the board having more drivers than we
- * know about, but do not expect new sensor states. */
- name = (monitor >= ARRAY_SIZE(sensor_names))
- ? "No sensor name available" :
- sensor_names[monitor];
- EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
- state_txt = sensor_status_names[state];
-
- netif_err(efx, hw, efx->net_dev,
- "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- monitor, name, state_txt, value);
-}
-
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -588,6 +560,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
+ case MCDI_EVENT_CODE_FLR:
+ efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -604,7 +579,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
+ u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
size_t outlength;
const __le16 *ver_words;
int rc;
@@ -616,7 +591,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
if (rc)
goto fail;
- if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
+ if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
rc = -EIO;
goto fail;
}
@@ -663,9 +638,9 @@ fail:
}
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
- u16 *fw_subtype_list)
+ u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
size_t outlen;
int port_num = efx_port_num(efx);
int offset;
@@ -678,7 +653,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
if (rc)
goto fail;
- if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
+ if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
rc = -EIO;
goto fail;
}
@@ -691,7 +666,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
if (fw_subtype_list)
memcpy(fw_subtype_list,
outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
+ sizeof(fw_subtype_list[0]));
+ if (capabilities) {
+ if (port_num)
+ *capabilities = MCDI_DWORD(outbuf,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ else
+ *capabilities = MCDI_DWORD(outbuf,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ }
return 0;
@@ -779,7 +763,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
- (1 << MC_CMD_NVRAM_PROTECTED_LBN));
+ (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
return 0;
fail:
@@ -1060,7 +1044,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
int efx_mcdi_reset_port(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
+ int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
__func__, rc);
@@ -1173,6 +1157,37 @@ fail:
return rc;
}
+int efx_mcdi_flush_rxqs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ __le32 *qid;
+ int rc, count;
+
+ qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
+ if (qid == NULL)
+ return -ENOMEM;
+
+ count = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ qid[count++] = cpu_to_le32(
+ efx_rx_queue_index(rx_queue));
+ }
+ }
+ }
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
+ count * sizeof(*qid), NULL, 0, NULL);
+ WARN_ON(rc > 0);
+
+ kfree(qid);
+
+ return rc;
+}
int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index aced2a7856fc..0bdf3e331832 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -56,6 +56,15 @@ struct efx_mcdi_iface {
size_t resplen;
};
+struct efx_mcdi_mon {
+ struct efx_buffer dma_buf;
+ struct mutex update_lock;
+ unsigned long last_update;
+ struct device *device;
+ struct efx_mcdi_mon_attribute *attrs;
+ unsigned int n_attrs;
+};
+
extern void efx_mcdi_init(struct efx_nic *efx);
extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
@@ -68,6 +77,7 @@ extern void efx_mcdi_mode_event(struct efx_nic *efx);
extern void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event);
+extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_PTR2(_buf, _ofst) \
(((u8 *)_buf) + _ofst)
@@ -83,6 +93,10 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
#define MCDI_PTR(_buf, _ofst) \
MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
+#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
+ MCDI_PTR2(_buf, \
+ MC_CMD_ ## _field ## _OFST + \
+ (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
#define MCDI_SET_DWORD(_buf, _ofst, _value) \
MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
#define MCDI_DWORD(_buf, _ofst) \
@@ -92,12 +106,18 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ EFX_DWORD_FIELD( \
+ *((efx_dword_t *) \
+ (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
+ (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
+ MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
- u16 *fw_subtype_list);
+ u16 *fw_subtype_list, u32 *capabilities);
extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
u32 dest_evq);
extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
@@ -126,5 +146,19 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+extern int efx_mcdi_set_mac(struct efx_nic *efx);
+extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear);
+extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
+extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+
+#ifdef CONFIG_SFC_MCDI_MON
+extern int efx_mcdi_mon_probe(struct efx_nic *efx);
+extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+#else
+static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
+#endif
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
index 50c20777a564..1003f309cba7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ b/drivers/net/ethernet/sfc/mcdi_mac.c
@@ -9,11 +9,10 @@
#include "net_driver.h"
#include "efx.h"
-#include "mac.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
-static int efx_mcdi_set_mac(struct efx_nic *efx)
+int efx_mcdi_set_mac(struct efx_nic *efx)
{
u32 reject, fcntl;
u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
@@ -45,6 +44,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
}
if (efx->wanted_fc & EFX_FC_AUTO)
fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
@@ -52,7 +53,7 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
NULL, 0, NULL);
}
-static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
size_t outlength;
@@ -62,16 +63,13 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
- if (rc)
- goto fail;
-
- *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT);
- return 0;
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return true;
+ }
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return rc;
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
@@ -84,7 +82,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u32 addr_hi;
u32 addr_lo;
- BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
addr_lo = ((u64)dma_addr) >> 0;
addr_hi = ((u64)dma_addr) >> 32;
@@ -93,13 +91,13 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
EFX_POPULATE_DWORD_7(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, !!enable,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
- MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
+ MC_CMD_MAC_STATS_IN_DMA, !!enable,
+ MC_CMD_MAC_STATS_IN_CLEAR, clear,
+ MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
+ MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
@@ -115,31 +113,18 @@ fail:
return rc;
}
-static int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
+int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
{
int rc;
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
rc = efx_mcdi_set_mac(efx);
if (rc != 0)
return rc;
- /* Restore the multicast hash registers. */
- efx->type->push_multicast_hash(efx);
-
- return 0;
-}
-
-
-static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
-{
- u32 faults;
- int rc = efx_mcdi_get_mac_faults(efx, &faults);
- return (rc != 0) || (faults != 0);
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ efx->multicast_hash.byte,
+ sizeof(efx->multicast_hash),
+ NULL, 0, NULL);
}
-
-
-const struct efx_mac_operations efx_mcdi_mac_operations = {
- .reconfigure = efx_mcdi_mac_reconfigure,
- .update_stats = efx_port_dummy_op_void,
- .check_fault = efx_mcdi_mac_check_fault,
-};
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
new file mode 100644
index 000000000000..fb7f65b59eb8
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -0,0 +1,415 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/stat.h>
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+
+enum efx_hwmon_type {
+ EFX_HWMON_UNKNOWN,
+ EFX_HWMON_TEMP, /* temperature */
+ EFX_HWMON_COOL, /* cooling device, probably a heatsink */
+ EFX_HWMON_IN /* input voltage */
+};
+
+static const struct {
+ const char *label;
+ enum efx_hwmon_type hwmon_type;
+ int port;
+} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
+ SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
+#undef SENSOR
+};
+
+static const char *const sensor_status_names[] = {
+ [MC_CMD_SENSOR_STATE_OK] = "OK",
+ [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+ [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+ [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+};
+
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ unsigned int type, state, value;
+ const char *name = NULL, *state_txt;
+
+ type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+ state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+ value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+
+ /* Deal gracefully with the board having more drivers than we
+ * know about, but do not expect new sensor states. */
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ name = efx_mcdi_sensor_type[type].label;
+ if (!name)
+ name = "No sensor name available";
+ EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+ state_txt = sensor_status_names[state];
+
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+ type, name, state_txt, value);
+}
+
+#ifdef CONFIG_SFC_MCDI_MON
+
+struct efx_mcdi_mon_attribute {
+ struct device_attribute dev_attr;
+ unsigned int index;
+ unsigned int type;
+ unsigned int limit_value;
+ char name[12];
+};
+
+static int efx_mcdi_mon_update(struct efx_nic *efx)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
+ hwmon->dma_buf.dma_addr & 0xffffffff);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
+ (u64)hwmon->dma_buf.dma_addr >> 32);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+ if (rc == 0)
+ hwmon->last_update = jiffies;
+ return rc;
+}
+
+static ssize_t efx_mcdi_mon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", KBUILD_MODNAME);
+}
+
+static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
+ efx_dword_t *entry)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
+
+ mutex_lock(&hwmon->update_lock);
+
+ /* Use cached value if last update was < 1 s ago */
+ if (time_before(jiffies, hwmon->last_update + HZ))
+ rc = 0;
+ else
+ rc = efx_mcdi_mon_update(efx);
+
+ /* Copy out the requested entry */
+ *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
+
+ mutex_unlock(&hwmon->update_lock);
+
+ return rc;
+}
+
+static ssize_t efx_mcdi_mon_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ efx_dword_t entry;
+ unsigned int value;
+ int rc;
+
+ rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+ if (rc)
+ return rc;
+
+ value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ /* Convert temperature from degrees to milli-degrees Celsius */
+ if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ unsigned int value;
+
+ value = mon_attr->limit_value;
+
+ /* Convert temperature from degrees to milli-degrees Celsius */
+ if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ efx_dword_t entry;
+ int state;
+ int rc;
+
+ rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+ if (rc)
+ return rc;
+
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
+}
+
+static ssize_t efx_mcdi_mon_show_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ return sprintf(buf, "%s\n",
+ efx_mcdi_sensor_type[mon_attr->type].label);
+}
+
+static int
+efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
+ ssize_t (*reader)(struct device *,
+ struct device_attribute *, char *),
+ unsigned int index, unsigned int type,
+ unsigned int limit_value)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
+ int rc;
+
+ strlcpy(attr->name, name, sizeof(attr->name));
+ attr->index = index;
+ attr->type = type;
+ attr->limit_value = limit_value;
+ attr->dev_attr.attr.name = attr->name;
+ attr->dev_attr.attr.mode = S_IRUGO;
+ attr->dev_attr.show = reader;
+ rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr);
+ if (rc == 0)
+ ++hwmon->n_attrs;
+ return rc;
+}
+
+int efx_mcdi_mon_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
+ u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
+ size_t outlen;
+ char name[12];
+ u32 mask;
+ int rc, i, type;
+
+ BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ /* Find out which sensors are present. Don't create a device
+ * if there are none.
+ */
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ if (mask == 0)
+ return 0;
+
+ /* Check again for short response */
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
+ return -EIO;
+
+ rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
+ 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
+ if (rc)
+ return rc;
+
+ mutex_init(&hwmon->update_lock);
+ efx_mcdi_mon_update(efx);
+
+ /* Allocate space for the maximum possible number of
+ * attributes for this set of sensors: name of the driver plus
+ * value, min, max, crit, alarm and label for each sensor.
+ */
+ n_attrs = 1 + 6 * hweight32(mask);
+ hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
+ if (!hwmon->attrs) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ hwmon->device = hwmon_device_register(&efx->pci_dev->dev);
+ if (IS_ERR(hwmon->device)) {
+ rc = PTR_ERR(hwmon->device);
+ goto fail;
+ }
+
+ rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
+ if (rc)
+ goto fail;
+
+ for (i = 0, type = -1; ; i++) {
+ const char *hwmon_prefix;
+ unsigned hwmon_index;
+ u16 min1, max1, min2, max2;
+
+ /* Find next sensor type or exit if there is none */
+ type++;
+ while (!(mask & (1 << type))) {
+ type++;
+ if (type == 32)
+ return 0;
+ }
+
+ /* Skip sensors specific to a different port */
+ if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port != efx_port_num(efx))
+ continue;
+
+ switch (efx_mcdi_sensor_type[type].hwmon_type) {
+ case EFX_HWMON_TEMP:
+ hwmon_prefix = "temp";
+ hwmon_index = ++n_temp; /* 1-based */
+ break;
+ case EFX_HWMON_COOL:
+ /* This is likely to be a heatsink, but there
+ * is no convention for representing cooling
+ * devices other than fans.
+ */
+ hwmon_prefix = "fan";
+ hwmon_index = ++n_cool; /* 1-based */
+ break;
+ default:
+ hwmon_prefix = "in";
+ hwmon_index = n_in++; /* 0-based */
+ break;
+ }
+
+ min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, i, MIN1);
+ max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, i, MAX1);
+ min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, i, MIN2);
+ max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, i, MAX2);
+
+ if (min1 != max1) {
+ snprintf(name, sizeof(name), "%s%u_input",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_value, i, type, 0);
+ if (rc)
+ goto fail;
+
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ if (rc)
+ goto fail;
+
+ snprintf(name, sizeof(name), "%s%u_max",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, max1);
+ if (rc)
+ goto fail;
+
+ if (min2 != max2) {
+ /* Assume max2 is critical value.
+ * But we have no good way to expose min2.
+ */
+ snprintf(name, sizeof(name), "%s%u_crit",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, max2);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ snprintf(name, sizeof(name), "%s%u_alarm",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
+ if (rc)
+ goto fail;
+
+ if (efx_mcdi_sensor_type[type].label) {
+ snprintf(name, sizeof(name), "%s%u_label",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_label, i, type, 0);
+ if (rc)
+ goto fail;
+ }
+ }
+
+fail:
+ efx_mcdi_mon_remove(efx);
+ return rc;
+}
+
+void efx_mcdi_mon_remove(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
+ unsigned int i;
+
+ for (i = 0; i < hwmon->n_attrs; i++)
+ device_remove_file(&efx->pci_dev->dev,
+ &hwmon->attrs[i].dev_attr);
+ kfree(hwmon->attrs);
+ if (hwmon->device)
+ hwmon_device_unregister(hwmon->device);
+ efx_nic_free_buffer(efx, &hwmon->dma_buf);
+}
+
+#endif /* CONFIG_SFC_MCDI_MON */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 41fe06fa0600..0310b9f08c9b 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -22,6 +22,18 @@
/* The Scheduler has started. */
#define MC_FW_STATE_SCHED (8)
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
/* Values to be written to the per-port status dword in shared
* memory on reboot and assert */
#define MC_STATUS_DWORD_REBOOT (0xb007b007)
@@ -34,6 +46,8 @@
*/
#define MCDI_PCOL_VERSION 1
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
/**
* MCDI version 1
*
@@ -131,53 +145,6 @@
*/
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
-#define MCDI_EVENT_DATA_LBN 0
-#define MCDI_EVENT_DATA_WIDTH 32
-#define MCDI_EVENT_CONT_LBN 32
-#define MCDI_EVENT_CONT_WIDTH 1
-#define MCDI_EVENT_LEVEL_LBN 33
-#define MCDI_EVENT_LEVEL_WIDTH 3
-#define MCDI_EVENT_LEVEL_INFO (0)
-#define MCDI_EVENT_LEVEL_WARN (1)
-#define MCDI_EVENT_LEVEL_ERR (2)
-#define MCDI_EVENT_LEVEL_FATAL (3)
-#define MCDI_EVENT_SRC_LBN 36
-#define MCDI_EVENT_SRC_WIDTH 8
-#define MCDI_EVENT_CODE_LBN 44
-#define MCDI_EVENT_CODE_WIDTH 8
-#define MCDI_EVENT_CODE_BADSSERT (1)
-#define MCDI_EVENT_CODE_PMNOTICE (2)
-#define MCDI_EVENT_CODE_CMDDONE (3)
-#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
-#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
-#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
-#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
-#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
-#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
-#define MCDI_EVENT_CODE_LINKCHANGE (4)
-#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
-#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
-#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
-#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 1
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 2
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 3
-#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
-#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
-#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
-#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
-#define MCDI_EVENT_CODE_SENSOREVT (5)
-#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
-#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
-#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
-#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
-#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
-#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
-#define MCDI_EVENT_CODE_SCHEDERR (6)
-#define MCDI_EVENT_CODE_REBOOT (7)
-#define MCDI_EVENT_CODE_MAC_STATS_DMA (8)
-#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
-#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
/* Non-existent command target */
#define MC_CMD_ERR_ENOENT 2
@@ -198,121 +165,24 @@
#define MC_CMD_ERR_CODE_OFST 0
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+/* Points to the recovery mode entry point. */
+#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
-/* MC_CMD_READ32: (debug, variadic out)
- * Read multiple 32byte words from MC memory
- */
-#define MC_CMD_READ32 0x01
-#define MC_CMD_READ32_IN_LEN 8
-#define MC_CMD_READ32_IN_ADDR_OFST 0
-#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
-#define MC_CMD_READ32_OUT_LEN(_numwords) \
- (4 * (_numwords))
-#define MC_CMD_READ32_OUT_BUFFER_OFST 0
-
-/* MC_CMD_WRITE32: (debug, variadic in)
- * Write multiple 32byte words to MC memory
- */
-#define MC_CMD_WRITE32 0x02
-#define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4)
-#define MC_CMD_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
-#define MC_CMD_WRITE32_OUT_LEN 0
-
-/* MC_CMD_COPYCODE: (debug)
- * Copy MC code between two locations and jump
- */
-#define MC_CMD_COPYCODE 0x03
-#define MC_CMD_COPYCODE_IN_LEN 16
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-/* Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 1
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-/* MC_CMD_SET_FUNC: (debug)
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x04
-#define MC_CMD_SET_FUNC_IN_LEN 4
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-/* MC_CMD_GET_BOOT_STATUS:
- * Get the instruction address from which the MC booted.
- */
-#define MC_CMD_GET_BOOT_STATUS 0x05
-#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
-#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
-#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
-#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
-/* Reboot caused by watchdog */
-#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0)
-#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1)
-/* MC booted from primary flash partition */
-#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1)
-#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1)
-/* MC booted from backup flash partition */
-#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2)
-#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1)
-
-/* MC_CMD_GET_ASSERTS: (debug, variadic out)
- * Get (and optionally clear) the current assertion status.
- *
- * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion
- * payload. The other fields will only be present if
- * OUT.GLOBAL_FLAGS != NO_FAILS
- */
-#define MC_CMD_GET_ASSERTS 0x06
-#define MC_CMD_GET_ASSERTS_IN_LEN 4
-#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
-#define MC_CMD_GET_ASSERTS_OUT_LEN 140
-/* Assertion status flag */
-#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
-/*! No assertions have failed. */
-#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1
-/*! A system-level assertion has failed. */
-#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2
-/*! A thread-level assertion has failed. */
-#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3
-/*! The system was reset by the watchdog. */
-#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4
-/* Failing PC value */
-#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
-/* Saved GP regs */
-#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
-#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124
-/* Failing thread address */
-#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
-
-/* MC_CMD_LOG_CTRL:
- * Determine the output stream for various events and messages
- */
-#define MC_CMD_LOG_CTRL 0x07
-#define MC_CMD_LOG_CTRL_IN_LEN 8
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1)
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2)
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
-#define MC_CMD_LOG_CTRL_OUT_LEN 0
-
-/* MC_CMD_GET_VERSION:
- * Get version information about the MC firmware
- */
-#define MC_CMD_GET_VERSION 0x08
-#define MC_CMD_GET_VERSION_IN_LEN 0
-#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
-#define MC_CMD_GET_VERSION_V1_OUT_LEN 32
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
-/* Reserved version number to indicate "any" version. */
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
-/* The version response of a boot ROM awaiting rescue */
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000
-#define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4
-/* 128bit mask of functions supported by the current firmware */
-#define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
(1 << MC_CMD_READ32) | \
@@ -320,1456 +190,2214 @@
(1 << MC_CMD_COPYCODE) | \
(1 << MC_CMD_GET_VERSION), \
0, 0, 0 }
-#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
-/* Vectors in the boot ROM */
-/* Point to the copycode entry point. */
-#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
-/* Points to the recovery mode entry point. */
-#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
+#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
+#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
+#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
+#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
+#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
+#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
+#define MCDI_EVENT_CODE_FLR 0xa /* enum */
+#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
+#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ */
+#define MC_CMD_SET_FUNC 0x4
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
-/* Test execution limits */
-#define MC_TESTEXEC_VARIANT_COUNT 16
-#define MC_TESTEXEC_RESULT_COUNT 7
-/* MC_CMD_SET_TESTVARS: (debug, variadic in)
- * Write variant words for test.
- *
- * The user supplies a bitmap of the variants they wish to set.
- * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP)
- */
-#define MC_CMD_SET_TESTVARS 0x09
-#define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \
- (4 + 4*(_numwords))
-#define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0
-/* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */
-#define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4
-#define MC_CMD_SET_TESTVARS_OUT_LEN 0
-
-/* MC_CMD_GET_TESTRCS: (debug, variadic out)
- * Return result words from test.
- */
-#define MC_CMD_GET_TESTRCS 0x0a
-#define MC_CMD_GET_TESTRCS_IN_LEN 4
-#define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0
-#define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \
- (4 * (_numwords))
-#define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0
-
-/* MC_CMD_RUN_TEST: (debug)
- * Run the test exported by this firmware image
- */
-#define MC_CMD_RUN_TEST 0x0b
-#define MC_CMD_RUN_TEST_IN_LEN 0
-#define MC_CMD_RUN_TEST_OUT_LEN 0
-
-/* MC_CMD_CSR_READ32: (debug, variadic out)
- * Read 32bit words from the indirect memory map
- */
-#define MC_CMD_CSR_READ32 0x0c
-#define MC_CMD_CSR_READ32_IN_LEN 12
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \
- (((_numwords) * 4) + 4)
-/* IN.NUMWORDS of 32bit words start here */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \
- ((_numwords) * 4)
-
-/* MC_CMD_CSR_WRITE32: (debug, variadic in)
- * Write 32bit dwords to the indirect memory map
- */
-#define MC_CMD_CSR_WRITE32 0x0d
-#define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \
- (((_numwords) * 4) + 8)
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-/* Multiple 32bit words of data to write start here */
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-
-/* MC_CMD_JTAG_WORK: (debug, fpga only)
- * Process JTAG work buffer for RBF acceleration.
- *
- * Host: bit count, (up to) 32 words of data to clock out to JTAG
- * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.)
- * MC: bit count, (up to) 32 words of data clocked in from JTAG
- * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused)
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get and clear any assertion status.
*/
-#define MC_CMD_JTAG_WORK 0x0e
+#define MC_CMD_GET_ASSERTS 0x6
-/* MC_CMD_STACKINFO: (debug, variadic out)
- * Get stack information
- *
- * Host: nothing
- * MC: (thread ptr, stack size, free space) for each thread in system
- */
-#define MC_CMD_STACKINFO 0x0f
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
-/* MC_CMD_MDIO_READ:
- * MDIO register read
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for various events and messages.
+ */
+#define MC_CMD_LOG_CTRL 0x7
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+
+/***********************************/
+/* MC_CMD_GET_FPGAREG
+ * Read multiple bytes from PTP FPGA.
+ */
+#define MC_CMD_GET_FPGAREG 0x9
+
+/* MC_CMD_GET_FPGAREG_IN msgrequest */
+#define MC_CMD_GET_FPGAREG_IN_LEN 8
+#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
+#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
+
+/* MC_CMD_GET_FPGAREG_OUT msgresponse */
+#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
+#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255
+#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
+#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
+#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
+#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255
+
+
+/***********************************/
+/* MC_CMD_PUT_FPGAREG
+ * Write multiple bytes to PTP FPGA.
+ */
+#define MC_CMD_PUT_FPGAREG 0xa
+
+/* MC_CMD_PUT_FPGAREG_IN msgrequest */
+#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
+#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255
+#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
+#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
+#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
+#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
+#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
+#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251
+
+/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
+#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
+#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
+#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
+#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
+#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
+#define MC_CMD_PTP_OP_MAX 0xc /* enum */
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
+#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
*/
#define MC_CMD_MDIO_READ 0x10
-#define MC_CMD_MDIO_READ_IN_LEN 16
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-
-/* MC_CMD_MDIO_WRITE:
- * MDIO register write
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-/* By default all the MCDI MDIO operations perform clause45 mode.
- * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 32
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-/* There are two MDIO buses: one for the internal PHY, and one for external
- * devices.
- */
-#define MC_CMD_MDIO_BUS_INTERNAL 0
-#define MC_CMD_MDIO_BUS_EXTERNAL 1
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
-/* The MDIO commands return the raw status bits from the MDIO block. A "good"
- * transaction should have the DONE bit set and all other bits clear.
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
*/
-#define MC_CMD_MDIO_STATUS_GOOD 0x08
+#define MC_CMD_MDIO_WRITE 0x11
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-/* MC_CMD_DBI_WRITE: (debug)
- * Write DBI register(s)
- *
- * Host: address, byte-enables (and VF selection, and cs2 flag),
- * value [,address ...]
- * MC: nothing
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
*/
#define MC_CMD_DBI_WRITE 0x12
-#define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \
- (12 * (_numwords))
-#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \
- (((_word) * 12) + 0)
-#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \
- (((_word) * 12) + 4)
-#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \
- (((_word) * 12) + 8)
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBI_READ: (debug)
- * Read DBI register(s)
- *
- * Host: address, [,address ...]
- * MC: value [,value ...]
- * (note: this does not support reading from VFs, but is retained for backwards
- * compatibility; see MC_CMD_DBI_READX below)
- */
-#define MC_CMD_DBI_READ 0x13
-#define MC_CMD_DBI_READ_IN_LEN(_numwords) \
- (4 * (_numwords))
-#define MC_CMD_DBI_READ_OUT_LEN(_numwords) \
- (4 * (_numwords))
-
-/* MC_CMD_PORT_READ32: (debug)
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
* Read a 32-bit register from the indirect port register map.
- *
- * The port to access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ32 0x14
-#define MC_CMD_PORT_READ32_IN_LEN 4
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-/* MC_CMD_PORT_WRITE32: (debug)
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
* Write a 32-bit register to the indirect port register map.
- *
- * The port to access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE32 0x15
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-
-/* MC_CMD_PORT_READ128: (debug)
- * Read a 128-bit register from indirect port register map
- *
- * The port to access is implied by the Shared memory channel used.
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map.
*/
#define MC_CMD_PORT_READ128 0x16
-#define MC_CMD_PORT_READ128_IN_LEN 4
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-
-/* MC_CMD_PORT_WRITE128: (debug)
- * Write a 128-bit register to indirect port register map.
- *
- * The port to access is implied by the Shared memory channel used.
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map.
*/
#define MC_CMD_PORT_WRITE128 0x17
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-
-/* MC_CMD_GET_BOARD_CFG:
- * Returns the MC firmware configuration structure
- *
- * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of
- * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file
- * for a specific board type, but otherwise have no meaning to the MC; they
- * are used by the driver to manage selection of appropriate firmware updates.
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
*/
#define MC_CMD_GET_BOARD_CFG 0x18
-#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
-#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96
-#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
-#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
-#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
-#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
-#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24
-
-/* MC_CMD_DBI_READX: (debug)
- * Read DBI register(s) -- extended functionality
- *
- * Host: vf selection, address, [,vf selection ...]
- * MC: value [,value ...]
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
+#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+/* Enum values, see field(s): */
+/* CAPABILITIES_PORT0 */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s).
*/
#define MC_CMD_DBI_READX 0x19
-#define MC_CMD_DBI_READX_IN_LEN(_numwords) \
- (8*(_numwords))
-#define MC_CMD_DBI_READX_OUT_LEN(_numwords) \
- (4*(_numwords))
-/* MC_CMD_SET_RAND_SEED:
- * Set the 16byte seed for the MC pseudo-random generator
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
*/
#define MC_CMD_SET_RAND_SEED 0x1a
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-/* MC_CMD_LTSSM_HIST: (debug)
- * Retrieve the history of the LTSSM, if the build supports it.
- *
- * Host: nothing
- * MC: variable number of LTSSM values, as bytes
- * The history is read-to-clear.
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the PCIE LTSSM.
*/
#define MC_CMD_LTSSM_HIST 0x1b
-/* MC_CMD_DRV_ATTACH:
- * Inform MCPU that this port is managed on the host (i.e. driver active)
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host.
*/
#define MC_CMD_DRV_ATTACH 0x1c
-#define MC_CMD_DRV_ATTACH_IN_LEN 8
-#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
-#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
-#define MC_CMD_DRV_ATTACH_OUT_LEN 4
-#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
-/* MC_CMD_NCSI_PROD: (debug)
- * Trigger an NC-SI event (and possibly an AEN in response)
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_NCSI_PROD
+ * Trigger an NC-SI event.
*/
#define MC_CMD_NCSI_PROD 0x1d
-#define MC_CMD_NCSI_PROD_IN_LEN 4
-#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
-#define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0
-#define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1
-#define MC_CMD_NCSI_PROD_RESET_LBN 1
-#define MC_CMD_NCSI_PROD_RESET_WIDTH 1
-#define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2
-#define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1
-#define MC_CMD_NCSI_PROD_OUT_LEN 0
-
-/* Enumeration */
-#define MC_CMD_NCSI_PROD_LINKCHANGE 0
-#define MC_CMD_NCSI_PROD_RESET 1
-#define MC_CMD_NCSI_PROD_DRVATTACH 2
-
-/* MC_CMD_DEVEL: (debug)
- * Reserved for development
- */
-#define MC_CMD_DEVEL 0x1e
-
-/* MC_CMD_SHMUART: (debug)
+
+/* MC_CMD_NCSI_PROD_IN msgrequest */
+#define MC_CMD_NCSI_PROD_IN_LEN 4
+#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
+#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
+#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
+#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
+#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
+#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
+#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
+#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
+#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
+#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
+
+/* MC_CMD_NCSI_PROD_OUT msgresponse */
+#define MC_CMD_NCSI_PROD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMUART
* Route UART output to circular buffer in shared memory instead.
*/
#define MC_CMD_SHMUART 0x1f
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_LEN 4
-#define MC_CMD_SHMUART_OUT_LEN 0
-/* MC_CMD_PORT_RESET:
- * Generic per-port reset. There is no equivalent for per-board reset.
- *
- * Locks required: None
- * Return code: 0, ETIME
- */
-#define MC_CMD_PORT_RESET 0x20
-#define MC_CMD_PORT_RESET_IN_LEN 0
-#define MC_CMD_PORT_RESET_OUT_LEN 0
-
-/* MC_CMD_RESOURCE_LOCK:
- * Generic resource lock/unlock interface.
- *
- * Locks required: None
- * Return code: 0,
- * EBUSY (if trylock is contended by other port),
- * EDEADLK (if trylock is already acquired by this port)
- * EINVAL (if unlock doesn't own the lock)
- */
-#define MC_CMD_RESOURCE_LOCK 0x21
-#define MC_CMD_RESOURCE_LOCK_IN_LEN 8
-#define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0
-#define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1
-#define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0
-#define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4
-#define MC_CMD_RESOURCE_LOCK_I2C 2
-#define MC_CMD_RESOURCE_LOCK_PHY 3
-#define MC_CMD_RESOURCE_LOCK_OUT_LEN 0
-
-/* MC_CMD_SPI_COMMAND: (variadic in, variadic out)
- * Read/Write to/from the SPI device.
- *
- * Locks required: SPI_LOCK
- * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held)
- */
-#define MC_CMD_SPI_COMMAND 0x22
-#define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes))
-#define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0
-#define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0
-#define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4
-#define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8
-/* Data to write here */
-#define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes)
-/* Data read here */
-#define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0
-
-/* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out)
- * Read/Write to/from the I2C bus.
- *
- * Locks required: I2C_LOCK
- * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held)
- */
-#define MC_CMD_I2C_RW 0x23
-#define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes))
-#define MC_CMD_I2C_RW_IN_ARGS_OFST 0
-#define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0
-#define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4
-/* Data to write here */
-#define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8
-#define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes)
-/* Data read here */
-#define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0
-
-/* Generic phy capability bitmask */
-#define MC_CMD_PHY_CAP_10HDX_LBN 1
-#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
-#define MC_CMD_PHY_CAP_10FDX_LBN 2
-#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
-#define MC_CMD_PHY_CAP_100HDX_LBN 3
-#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
-#define MC_CMD_PHY_CAP_100FDX_LBN 4
-#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
-#define MC_CMD_PHY_CAP_1000HDX_LBN 5
-#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
-#define MC_CMD_PHY_CAP_1000FDX_LBN 6
-#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
-#define MC_CMD_PHY_CAP_10000FDX_LBN 7
-#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
-#define MC_CMD_PHY_CAP_PAUSE_LBN 8
-#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
-#define MC_CMD_PHY_CAP_ASYM_LBN 9
-#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
-#define MC_CMD_PHY_CAP_AN_LBN 10
-#define MC_CMD_PHY_CAP_AN_WIDTH 1
-
-/* Generic loopback enumeration */
-#define MC_CMD_LOOPBACK_NONE 0
-#define MC_CMD_LOOPBACK_DATA 1
-#define MC_CMD_LOOPBACK_GMAC 2
-#define MC_CMD_LOOPBACK_XGMII 3
-#define MC_CMD_LOOPBACK_XGXS 4
-#define MC_CMD_LOOPBACK_XAUI 5
-#define MC_CMD_LOOPBACK_GMII 6
-#define MC_CMD_LOOPBACK_SGMII 7
-#define MC_CMD_LOOPBACK_XGBR 8
-#define MC_CMD_LOOPBACK_XFI 9
-#define MC_CMD_LOOPBACK_XAUI_FAR 10
-#define MC_CMD_LOOPBACK_GMII_FAR 11
-#define MC_CMD_LOOPBACK_SGMII_FAR 12
-#define MC_CMD_LOOPBACK_XFI_FAR 13
-#define MC_CMD_LOOPBACK_GPHY 14
-#define MC_CMD_LOOPBACK_PHYXS 15
-#define MC_CMD_LOOPBACK_PCS 16
-#define MC_CMD_LOOPBACK_PMAPMD 17
-#define MC_CMD_LOOPBACK_XPORT 18
-#define MC_CMD_LOOPBACK_XGMII_WS 19
-#define MC_CMD_LOOPBACK_XAUI_WS 20
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 21
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22
-#define MC_CMD_LOOPBACK_GMII_WS 23
-#define MC_CMD_LOOPBACK_XFI_WS 24
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 25
-#define MC_CMD_LOOPBACK_PHYXS_WS 26
-
-/* Generic PHY statistics enumeration */
-#define MC_CMD_OUI 0
-#define MC_CMD_PMA_PMD_LINK_UP 1
-#define MC_CMD_PMA_PMD_RX_FAULT 2
-#define MC_CMD_PMA_PMD_TX_FAULT 3
-#define MC_CMD_PMA_PMD_SIGNAL 4
-#define MC_CMD_PMA_PMD_SNR_A 5
-#define MC_CMD_PMA_PMD_SNR_B 6
-#define MC_CMD_PMA_PMD_SNR_C 7
-#define MC_CMD_PMA_PMD_SNR_D 8
-#define MC_CMD_PCS_LINK_UP 9
-#define MC_CMD_PCS_RX_FAULT 10
-#define MC_CMD_PCS_TX_FAULT 11
-#define MC_CMD_PCS_BER 12
-#define MC_CMD_PCS_BLOCK_ERRORS 13
-#define MC_CMD_PHYXS_LINK_UP 14
-#define MC_CMD_PHYXS_RX_FAULT 15
-#define MC_CMD_PHYXS_TX_FAULT 16
-#define MC_CMD_PHYXS_ALIGN 17
-#define MC_CMD_PHYXS_SYNC 18
-#define MC_CMD_AN_LINK_UP 19
-#define MC_CMD_AN_COMPLETE 20
-#define MC_CMD_AN_10GBT_STATUS 21
-#define MC_CMD_CL22_LINK_UP 22
-#define MC_CMD_PHY_NSTATS 23
-
-/* MC_CMD_GET_PHY_CFG:
- * Report PHY configuration. This guarantees to succeed even if the PHY is in
- * a "zombie" state.
- *
- * Locks required: None
- * Return code: 0
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-port reset.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * puts(3) implementation over MCDI
+ */
+#define MC_CMD_PUTS 0x23
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 255
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 243
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration.
*/
#define MC_CMD_GET_PHY_CFG 0x24
-#define MC_CMD_GET_PHY_CFG_IN_LEN 0
-#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
-
-#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
-#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
-#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
-#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
-#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
-#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
-#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
-#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
-/* Bitmask of supported capabilities */
-#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
-#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
-#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
-/* PHY statistics bitmap */
-#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
-/* PHY type/name string */
-#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
-#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
-#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
-#define MC_CMD_MEDIA_XAUI 1
-#define MC_CMD_MEDIA_CX4 2
-#define MC_CMD_MEDIA_KX4 3
-#define MC_CMD_MEDIA_XFP 4
-#define MC_CMD_MEDIA_SFP_PLUS 5
-#define MC_CMD_MEDIA_BASE_T 6
-/* MDIO "MMDS" supported */
-#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
-/* Native clause 22 */
-#define MC_CMD_MMD_CLAUSE22 0
-#define MC_CMD_MMD_CLAUSE45_PMAPMD 1
-#define MC_CMD_MMD_CLAUSE45_WIS 2
-#define MC_CMD_MMD_CLAUSE45_PCS 3
-#define MC_CMD_MMD_CLAUSE45_PHYXS 4
-#define MC_CMD_MMD_CLAUSE45_DTEXS 5
-#define MC_CMD_MMD_CLAUSE45_TC 6
-#define MC_CMD_MMD_CLAUSE45_AN 7
-/* Clause22 proxied over clause45 by PHY */
-#define MC_CMD_MMD_CLAUSE45_C22EXT 29
-#define MC_CMD_MMD_CLAUSE45_VEND1 30
-#define MC_CMD_MMD_CLAUSE45_VEND2 31
-/* PHY stepping version */
-#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
-#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
-
-/* MC_CMD_START_BIST:
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
+#define MC_CMD_MEDIA_CX4 0x2 /* enum */
+#define MC_CMD_MEDIA_KX4 0x3 /* enum */
+#define MC_CMD_MEDIA_XFP 0x4 /* enum */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
+#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
* Start a BIST test on the PHY.
- *
- * Locks required: PHY_LOCK if doing a PHY BIST
- * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_START_BIST 0x25
-#define MC_CMD_START_BIST_IN_LEN 4
-#define MC_CMD_START_BIST_IN_TYPE_OFST 0
-#define MC_CMD_START_BIST_OUT_LEN 0
-
-/* Run the PHY's short cable BIST */
-#define MC_CMD_PHY_BIST_CABLE_SHORT 1
-/* Run the PHY's long cable BIST */
-#define MC_CMD_PHY_BIST_CABLE_LONG 2
-/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
-#define MC_CMD_BPX_SERDES_BIST 3
-/* Run the MC loopback tests */
-#define MC_CMD_MC_LOOPBACK_BIST 4
-/* Run the PHY's standard BIST */
-#define MC_CMD_PHY_BIST 5
-
-/* MC_CMD_POLL_PHY_BIST: (variadic output)
- * Poll for BIST completion
- *
- * Returns a single status code, and optionally some PHY specific
- * bist output. The driver should only consume the BIST output
- * after validating OUTLEN and PHY_CFG.PHY_TYPE.
- *
- * If a driver can't successfully parse the BIST output, it should
- * still respect the pass/Fail in OUT.RESULT
- *
- * Locks required: PHY_LOCK if doing a PHY BIST
- * Return code: 0, EACCES (if PHY_LOCK is not held)
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
+#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
+#define MC_CMD_PHY_BIST 0x5 /* enum */
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion.
*/
#define MC_CMD_POLL_BIST 0x26
-#define MC_CMD_POLL_BIST_IN_LEN 0
-#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
-#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
-#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
-#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
-#define MC_CMD_POLL_BIST_RUNNING 1
-#define MC_CMD_POLL_BIST_PASSED 2
-#define MC_CMD_POLL_BIST_FAILED 3
-#define MC_CMD_POLL_BIST_TIMEOUT 4
-/* Generic: */
-#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
-/* SFT9001-specific: */
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
-#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
-#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
-/* mrsfp "PHY" driver: */
-#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
-#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
-#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
-
-/* MC_CMD_PHY_SPI: (variadic in, variadic out)
- * Read/Write/Erase the PHY SPI device
- *
- * Locks required: PHY_LOCK
- * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held)
- */
-#define MC_CMD_PHY_SPI 0x27
-#define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes))
-#define MC_CMD_PHY_SPI_IN_ARGS_OFST 0
-#define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0
-#define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4
-#define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8
-/* Data to write here */
-#define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12
-#define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes)
-/* Data read here */
-#define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0
-
-
-/* MC_CMD_GET_LOOPBACK_MODES:
- * Returns a bitmask of loopback modes evailable at each speed.
- *
- * Locks required: None
- * Return code: 0
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
+#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
+#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s).
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Get port's loopback modes.
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
-#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
-#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
-#define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0
-#define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8
-#define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16
-#define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24
-
-/* Flow control enumeration */
-#define MC_CMD_FCNTL_OFF 0
-#define MC_CMD_FCNTL_RESPOND 1
-#define MC_CMD_FCNTL_BIDIR 2
-/* Auto - Use what the link has autonegotiated
- * - The driver should modify the advertised capabilities via SET_LINK.CAP
- * to control the negotiated flow control mode.
- * - Can only be set if the PHY supports PAUSE+ASYM capabilities
- * - Never returned by GET_LINK as the value programmed into the MAC
- */
-#define MC_CMD_FCNTL_AUTO 3
-
-/* Generic mac fault bitmask */
-#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
-#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
-#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
-#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
-#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
-#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
-
-/* MC_CMD_GET_LINK:
- * Read the unified MAC/PHY link state
- *
- * Locks required: None
- * Return code: 0, ETIME
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
+#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
+#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
+#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
+#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
+#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
+#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
+#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
+#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
+#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
+#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
+#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
+#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
+#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state.
*/
#define MC_CMD_GET_LINK 0x29
-#define MC_CMD_GET_LINK_IN_LEN 0
-#define MC_CMD_GET_LINK_OUT_LEN 28
-/* near-side and link-partner advertised capabilities */
-#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
-#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
-/* Autonegotiated speed in mbit/s. The link may still be down
- * even if this reads non-zero */
-#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
-#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
-#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
-/* Whether we have overall link up */
-#define MC_CMD_GET_LINK_LINK_UP_LBN 0
-#define MC_CMD_GET_LINK_LINK_UP_WIDTH 1
-#define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1
-#define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1
-/* Whether we have link at the layers provided by the BPX */
-#define MC_CMD_GET_LINK_BPX_LINK_LBN 2
-#define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1
-/* Whether the PHY has external link */
-#define MC_CMD_GET_LINK_PHY_LINK_LBN 3
-#define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1
-#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
-
-/* MC_CMD_SET_LINK:
- * Write the unified MAC/PHY link configuration
- *
- * A loopback speed of "0" is supported, and means
- * (choose any available speed)
- *
- * Locks required: None
- * Return code: 0, EINVAL, ETIME
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_FCNTL_OFF 0x0 /* enum */
+#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
+#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration.
*/
#define MC_CMD_SET_LINK 0x2a
-#define MC_CMD_SET_LINK_IN_LEN 16
-#define MC_CMD_SET_LINK_IN_CAP_OFST 0
-#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
-#define MC_CMD_SET_LINK_LOWPOWER_LBN 0
-#define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1
-#define MC_CMD_SET_LINK_POWEROFF_LBN 1
-#define MC_CMD_SET_LINK_POWEROFF_WIDTH 1
-#define MC_CMD_SET_LINK_TXDIS_LBN 2
-#define MC_CMD_SET_LINK_TXDIS_WIDTH 1
-#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
-#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
-#define MC_CMD_SET_LINK_OUT_LEN 0
-
-/* MC_CMD_SET_ID_LED:
- * Set indentification LED state
- *
- * Locks required: None
- * Return code: 0, EINVAL
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set indentification LED state.
*/
#define MC_CMD_SET_ID_LED 0x2b
-#define MC_CMD_SET_ID_LED_IN_LEN 4
-#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
-#define MC_CMD_LED_OFF 0
-#define MC_CMD_LED_ON 1
-#define MC_CMD_LED_DEFAULT 2
-#define MC_CMD_SET_ID_LED_OUT_LEN 0
-
-/* MC_CMD_SET_MAC:
- * Set MAC configuration
- *
- * The MTU is the MTU programmed directly into the XMAC/GMAC
- * (inclusive of EtherII, VLAN, bug16011 padding)
- *
- * Locks required: None
- * Return code: 0, EINVAL
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration.
*/
#define MC_CMD_SET_MAC 0x2c
-#define MC_CMD_SET_MAC_IN_LEN 24
-#define MC_CMD_SET_MAC_IN_MTU_OFST 0
-#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
-#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
-#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
-#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
-#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
-#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
-#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
-#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
-#define MC_CMD_SET_MAC_OUT_LEN 0
-
-/* MC_CMD_PHY_STATS:
- * Get generic PHY statistics
- *
- * This call returns the statistics for a generic PHY in a sparse
- * array (indexed by the enumerate). Each value is represented by
- * a 32bit number.
- *
- * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
- * may be read directly out of shared memory. If DMA_ADDR != 0, then
- * the statistics are dmad to that (page-aligned location)
- *
- * Locks required: None
- * Returns: 0, ETIME
- * Response methods: shared memory, event
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics.
*/
#define MC_CMD_PHY_STATS 0x2d
-#define MC_CMD_PHY_STATS_IN_LEN 8
-#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
-#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
-#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
-
-/* Unified MAC statistics enumeration */
-#define MC_CMD_MAC_GENERATION_START 0
-#define MC_CMD_MAC_TX_PKTS 1
-#define MC_CMD_MAC_TX_PAUSE_PKTS 2
-#define MC_CMD_MAC_TX_CONTROL_PKTS 3
-#define MC_CMD_MAC_TX_UNICAST_PKTS 4
-#define MC_CMD_MAC_TX_MULTICAST_PKTS 5
-#define MC_CMD_MAC_TX_BROADCAST_PKTS 6
-#define MC_CMD_MAC_TX_BYTES 7
-#define MC_CMD_MAC_TX_BAD_BYTES 8
-#define MC_CMD_MAC_TX_LT64_PKTS 9
-#define MC_CMD_MAC_TX_64_PKTS 10
-#define MC_CMD_MAC_TX_65_TO_127_PKTS 11
-#define MC_CMD_MAC_TX_128_TO_255_PKTS 12
-#define MC_CMD_MAC_TX_256_TO_511_PKTS 13
-#define MC_CMD_MAC_TX_512_TO_1023_PKTS 14
-#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15
-#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16
-#define MC_CMD_MAC_TX_GTJUMBO_PKTS 17
-#define MC_CMD_MAC_TX_BAD_FCS_PKTS 18
-#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19
-#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20
-#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21
-#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22
-#define MC_CMD_MAC_TX_DEFERRED_PKTS 23
-#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24
-#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25
-#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26
-#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27
-#define MC_CMD_MAC_RX_PKTS 28
-#define MC_CMD_MAC_RX_PAUSE_PKTS 29
-#define MC_CMD_MAC_RX_GOOD_PKTS 30
-#define MC_CMD_MAC_RX_CONTROL_PKTS 31
-#define MC_CMD_MAC_RX_UNICAST_PKTS 32
-#define MC_CMD_MAC_RX_MULTICAST_PKTS 33
-#define MC_CMD_MAC_RX_BROADCAST_PKTS 34
-#define MC_CMD_MAC_RX_BYTES 35
-#define MC_CMD_MAC_RX_BAD_BYTES 36
-#define MC_CMD_MAC_RX_64_PKTS 37
-#define MC_CMD_MAC_RX_65_TO_127_PKTS 38
-#define MC_CMD_MAC_RX_128_TO_255_PKTS 39
-#define MC_CMD_MAC_RX_256_TO_511_PKTS 40
-#define MC_CMD_MAC_RX_512_TO_1023_PKTS 41
-#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42
-#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43
-#define MC_CMD_MAC_RX_GTJUMBO_PKTS 44
-#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45
-#define MC_CMD_MAC_RX_BAD_FCS_PKTS 46
-#define MC_CMD_MAC_RX_OVERFLOW_PKTS 47
-#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48
-#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49
-#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50
-#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51
-#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52
-#define MC_CMD_MAC_RX_JABBER_PKTS 53
-#define MC_CMD_MAC_RX_NODESC_DROPS 54
-#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55
-#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56
-#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
-#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
-#define MC_CMD_MAC_RX_MATCH_FAULT 59
-#define MC_CMD_GMAC_DMABUF_START 64
-#define MC_CMD_GMAC_DMABUF_END 95
-/* Insert new members here. */
-#define MC_CMD_MAC_GENERATION_END 96
-#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
-
-/* MC_CMD_MAC_STATS:
- * Get unified GMAC/XMAC statistics
- *
- * This call returns unified statistics maintained by the MC as it
- * switches between the GMAC and XMAC. The MC will write out all
- * supported stats. The driver should zero initialise the buffer to
- * guarantee consistent results.
- *
- * Locks required: None
- * Returns: 0
- * Response methods: shared memory, event
- */
-#define MC_CMD_MAC_STATS 0x2e
-#define MC_CMD_MAC_STATS_IN_LEN 16
-#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
-#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
-#define MC_CMD_MAC_STATS_CMD_DMA_LBN 0
-#define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1
-#define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1
-#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
-/* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
-#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
-#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
-#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
-#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
-
-#define MC_CMD_MAC_STATS_OUT_LEN 0
-
-/* Callisto flags */
-#define MC_CMD_SFT9001_ROBUST_LBN 0
-#define MC_CMD_SFT9001_ROBUST_WIDTH 1
-#define MC_CMD_SFT9001_SHORT_REACH_LBN 1
-#define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1
-
-/* MC_CMD_SFT9001_GET:
- * Read current callisto specific setting
- *
- * Locks required: None
- * Returns: 0, ETIME
- */
-#define MC_CMD_SFT9001_GET 0x30
-#define MC_CMD_SFT9001_GET_IN_LEN 0
-#define MC_CMD_SFT9001_GET_OUT_LEN 4
-#define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0
-/* MC_CMD_SFT9001_SET:
- * Write current callisto specific setting
- *
- * Locks required: None
- * Returns: 0, ETIME, EINVAL
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+#define MC_CMD_OUI 0x0 /* enum */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
+#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
+#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
+#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
+#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
+#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
+#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
+#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
+#define MC_CMD_PCS_BER 0xc /* enum */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
+#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
+#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
+#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
+#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
+#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
+#define MC_CMD_AN_LINK_UP 0x13 /* enum */
+#define MC_CMD_AN_COMPLETE 0x14 /* enum */
+#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
+#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
+#define MC_CMD_PHY_NSTATS 0x17 /* enum */
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics.
*/
-#define MC_CMD_SFT9001_SET 0x31
-#define MC_CMD_SFT9001_SET_IN_LEN 4
-#define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0
-#define MC_CMD_SFT9001_SET_OUT_LEN 0
-
+#define MC_CMD_MAC_STATS 0x2e
-/* MC_CMD_WOL_FILTER_SET:
- * Set a WoL filter
- *
- * Locks required: None
- * Returns: 0, EBUSY, EINVAL, ENOSYS
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
+#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * Perform memory copy operation.
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
*/
#define MC_CMD_WOL_FILTER_SET 0x32
-#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */
-#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
-#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
-
-/* There is a union at offset 8, following defines overlap due to
- * this */
-#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
-
-#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \
- MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
-
-#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \
- MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
-#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4)
-#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8)
-#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10)
-
-#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \
- MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
-#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16)
-#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32)
-#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34)
-
-#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \
- MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
-#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48)
-#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176)
-#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177)
-#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
- (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
-
-#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
- MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
-#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
-#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
-#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
-#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
-
-#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
-#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
-
-/* WOL Filter types enumeration */
-#define MC_CMD_WOL_TYPE_MAGIC 0x0
- /* unused 0x1 */
-#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
-#define MC_CMD_WOL_TYPE_BITMAP 0x5
-#define MC_CMD_WOL_TYPE_LINK 0x6
-#define MC_CMD_WOL_TYPE_MAX 0x7
-
-#define MC_CMD_FILTER_MODE_SIMPLE 0x0
-#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
-
-/* MC_CMD_WOL_FILTER_REMOVE:
- * Remove a WoL filter
- *
- * Locks required: None
- * Returns: 0, EINVAL, ENOSYS
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
+#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
+#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter.
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
-#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
-#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
-#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
-/* MC_CMD_WOL_FILTER_RESET:
- * Reset (i.e. remove all) WoL filters
- *
- * Locks required: None
- * Returns: 0, ENOSYS
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters.
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
-#define MC_CMD_WOL_FILTER_RESET_IN_LEN 0
-#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
-/* MC_CMD_SET_MCAST_HASH:
- * Set the MCASH hash value without otherwise
- * reconfiguring the MAC
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCASH hash value.
*/
#define MC_CMD_SET_MCAST_HASH 0x35
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-/* MC_CMD_NVRAM_TYPES:
- * Return bitfield indicating available types of virtual NVRAM partitions
- *
- * Locks required: none
- * Returns: 0
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Get virtual NVRAM partitions information.
*/
#define MC_CMD_NVRAM_TYPES 0x36
-#define MC_CMD_NVRAM_TYPES_IN_LEN 0
-#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
-#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
-
-/* Supported NVRAM types */
-#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0
-#define MC_CMD_NVRAM_TYPE_MC_FW 1
-#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6
-#define MC_CMD_NVRAM_TYPE_EXP_ROM 7
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9
-#define MC_CMD_NVRAM_TYPE_PHY_PORT0 10
-#define MC_CMD_NVRAM_TYPE_PHY_PORT1 11
-#define MC_CMD_NVRAM_TYPE_LOG 12
-
-/* MC_CMD_NVRAM_INFO:
- * Read info about a virtual NVRAM partition
- *
- * Locks required: none
- * Returns: 0, EINVAL (bad type)
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition.
*/
#define MC_CMD_NVRAM_INFO 0x37
-#define MC_CMD_NVRAM_INFO_IN_LEN 4
-#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_INFO_OUT_LEN 24
-#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
-#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
-#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
-#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
-#define MC_CMD_NVRAM_PROTECTED_LBN 0
-#define MC_CMD_NVRAM_PROTECTED_WIDTH 1
-#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
-#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
-
-/* MC_CMD_NVRAM_UPDATE_START:
- * Start a group of update operations on a virtual NVRAM partition
- *
- * Locks required: PHY_LOCK if type==*PHY*
- * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held)
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition.
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
-#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
-#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
-/* MC_CMD_NVRAM_READ:
- * Read data from a virtual NVRAM partition
- *
- * Locks required: PHY_LOCK if type==*PHY*
- * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition.
*/
#define MC_CMD_NVRAM_READ 0x39
-#define MC_CMD_NVRAM_READ_IN_LEN 12
-#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
-#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
-#define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes)
-#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
-
-/* MC_CMD_NVRAM_WRITE:
- * Write data to a virtual NVRAM partition
- *
- * Locks required: PHY_LOCK if type==*PHY*
- * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 255
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition.
*/
#define MC_CMD_NVRAM_WRITE 0x3a
-#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
-#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
-#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes)
-#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
-
-/* MC_CMD_NVRAM_ERASE:
- * Erase sector(s) from a virtual NVRAM partition
- *
- * Locks required: PHY_LOCK if type==*PHY*
- * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition.
*/
#define MC_CMD_NVRAM_ERASE 0x3b
-#define MC_CMD_NVRAM_ERASE_IN_LEN 12
-#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
-#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
-#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
-
-/* MC_CMD_NVRAM_UPDATE_FINISH:
- * Finish a group of update operations on a virtual NVRAM partition
- *
- * Locks required: PHY_LOCK if type==*PHY*
- * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition.
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
-#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
-/* MC_CMD_REBOOT:
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REBOOT
* Reboot the MC.
- *
- * The AFTER_ASSERTION flag is intended to be used when the driver notices
- * an assertion failure (at which point it is expected to perform a complete
- * tear down and reinitialise), to allow both ports to reset the MC once
- * in an atomic fashion.
- *
- * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
- * which means that they will automatically reboot out of the assertion
- * handler, so this is in practise an optional operation. It is still
- * recommended that drivers execute this to support custom firmwares
- * with REBOOT_ON_ASSERT=0.
- *
- * Locks required: NONE
- * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
*/
#define MC_CMD_REBOOT 0x3d
-#define MC_CMD_REBOOT_IN_LEN 4
-#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
-#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1
-#define MC_CMD_REBOOT_OUT_LEN 0
-/* MC_CMD_SCHEDINFO:
- * Request scheduler info. from the MC.
- *
- * Locks required: NONE
- * Returns: An array of (timeslice,maximum overrun), one for each thread,
- * in ascending order of thread address.s
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info.
*/
#define MC_CMD_SCHEDINFO 0x3e
-#define MC_CMD_SCHEDINFO_IN_LEN 0
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
-/* MC_CMD_SET_REBOOT_MODE: (debug)
- * Set the mode for the next MC reboot.
- *
- * Locks required: NONE
- *
- * Sets the reboot mode to the specified value. Returns the old mode.
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
*/
#define MC_CMD_REBOOT_MODE 0x3f
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_NORMAL 0
-#define MC_CMD_REBOOT_MODE_SNAPPER 3
-
-/* MC_CMD_DEBUG_LOG:
- * Null request/response command (debug)
- * - sequence number is always zero
- * - only supported on the UART interface
- * (the same set of bytes is delivered as an
- * event over PCI)
- */
-#define MC_CMD_DEBUG_LOG 0x40
-#define MC_CMD_DEBUG_LOG_IN_LEN 0
-#define MC_CMD_DEBUG_LOG_OUT_LEN 0
-
-/* Generic sensor enumeration. Note that a dual port NIC
- * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and
- * PHY1_TEMP depending on whether there is a single sensor
- * in the vicinity of the two port, or one per port.
- */
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */
-#define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */
-#define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */
-#define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */
-#define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */
-#define MC_CMD_SENSOR_IN_1V0 7 /* mV */
-#define MC_CMD_SENSOR_IN_1V2 8 /* mV */
-#define MC_CMD_SENSOR_IN_1V8 9 /* mV */
-#define MC_CMD_SENSOR_IN_2V5 10 /* mV */
-#define MC_CMD_SENSOR_IN_3V3 11 /* mV */
-#define MC_CMD_SENSOR_IN_12V0 12 /* mV */
-
-
-/* Sensor state */
-#define MC_CMD_SENSOR_STATE_OK 0
-#define MC_CMD_SENSOR_STATE_WARNING 1
-#define MC_CMD_SENSOR_STATE_FATAL 2
-#define MC_CMD_SENSOR_STATE_BROKEN 3
-
-/* MC_CMD_SENSOR_INFO:
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
- *
- * Each sensor has a single (16bit) value, and a corresponding state.
- * The mapping between value and sensor is nominally determined by the
- * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE),
- * or two (VOLTAGE) ranges per sensor per state.
- *
- * This call returns a mask (32bit) of the sensors that are supported
- * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte
- * offsets to the per-sensor arrays. Each sensor array has four 16bit
- * numbers, min1, max1, min2, max2.
- *
- * Locks required: None
- * Returns: 0
*/
#define MC_CMD_SENSOR_INFO 0x41
-#define MC_CMD_SENSOR_INFO_IN_LEN 0
-#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
-#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
- (4 + (_x))
-#define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \
- ((_ofst) + 0)
-#define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \
- ((_ofst) + 2)
-#define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \
- ((_ofst) + 4)
-#define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \
- ((_ofst) + 6)
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
+#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
+#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
+#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
+#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
+#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
+#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
+#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
+#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
/* MC_CMD_READ_SENSORS
- * Returns the current reading from each sensor
- *
- * Returns a sparse array of sensor readings (indexed by the sensor
- * type) into host memory. Each array element is a dword.
- *
- * The MC will send a SENSOREVT event every time any sensor changes state. The
- * driver is responsible for ensuring that it doesn't miss any events. The board
- * will function normally if all sensors are in STATE_OK or state_WARNING.
- * Otherwise the board should not be expected to function.
+ * Returns the current reading from each sensor.
*/
#define MC_CMD_READ_SENSORS 0x42
-#define MC_CMD_READ_SENSORS_IN_LEN 8
-#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
-#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_READ_SENSORS_OUT_LEN 0
-/* Sensor reading fields */
-#define MC_CMD_READ_SENSOR_VALUE_LBN 0
-#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
-#define MC_CMD_READ_SENSOR_STATE_LBN 16
-#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
-
-
-/* MC_CMD_GET_PHY_STATE:
- * Report current state of PHY. A "zombie" PHY is a PHY that has failed to
- * boot (e.g. due to missing or corrupted firmware).
- *
- * Locks required: None
- * Return code: 0
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY.
*/
#define MC_CMD_GET_PHY_STATE 0x43
-#define MC_CMD_GET_PHY_STATE_IN_LEN 0
-#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
-#define MC_CMD_GET_PHY_STATE_STATE_OFST 0
-/* PHY state enumeration: */
-#define MC_CMD_PHY_STATE_OK 1
-#define MC_CMD_PHY_STATE_ZOMBIE 2
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
-/* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority. */
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control.
+ */
#define MC_CMD_SETUP_8021QBB 0x44
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-/* MC_CMD_WOL_FILTER_GET:
- * Retrieve ID of any WoL filters
- *
- * Locks required: None
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_WOL_FILTER_GET 0x45
-#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
-#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
-#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD:
- * Offload a protocol to NIC for lights-out state
- *
- * Locks required: None
- * Returns: 0, ENOSYS
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters.
*/
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#define MC_CMD_WOL_FILTER_GET 0x45
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
-/* There is a union at offset 4, following defines overlap due to
- * this */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state.
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-/* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD:
- * Offload a protocol to NIC for lights-out state
- *
- * Locks required: None
- * Returns: 0, ENOSYS
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state.
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-/* Lights-out offload protocols enumeration */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-/* MC_CMD_MAC_RESET_RESTORE:
- * Restore MAC after block reset
- *
- * Locks required: None
- * Returns: 0
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset.
*/
-
#define MC_CMD_MAC_RESET_RESTORE 0x48
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-/* MC_CMD_TEST_ASSERT:
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully).
- *
- * Locks required: None
- * Returns: 0
- */
+/***********************************/
+/* MC_CMD_TESTASSERT
+ */
#define MC_CMD_TESTASSERT 0x49
-#define MC_CMD_TESTASSERT_IN_LEN 0
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-/* MC_CMD_WORKAROUND 0x4a
- *
- * Enable/Disable a given workaround. The mcfw will return EINVAL if it
- * doesn't understand the given workaround number - which should not
- * be treated as a hard error by client code.
- *
- * This op does not imply any semantics about each workaround, that's between
- * the driver and the mcfw on a per-workaround basis.
- *
- * Locks required: None
- * Returns: 0, EINVAL
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround.
*/
#define MC_CMD_WORKAROUND 0x4a
-#define MC_CMD_WORKAROUND_IN_LEN 8
-#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
-#define MC_CMD_WORKAROUND_BUG17230 1
-#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
-#define MC_CMD_WORKAROUND_OUT_LEN 0
-
-/* MC_CMD_GET_PHY_MEDIA_INFO:
- * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
- * SFP+ PHYs).
- *
- * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
- * the valid "page number" input values, and the output data, are interpreted
- * on a per-type basis.
- *
- * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
- * 0xA0 offset 0 or 0x80.
- * Anything else: currently undefined.
- *
- * Locks required: None
- * Return code: 0
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
-#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
-#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
-#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
-#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
-#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
-
-/* MC_CMD_NVRAM_TEST:
- * Test a particular NVRAM partition for valid contents (where "valid"
- * depends on the type of partition).
- *
- * Locks required: None
- * Return code: 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition.
*/
#define MC_CMD_NVRAM_TEST 0x4c
-#define MC_CMD_NVRAM_TEST_IN_LEN 4
-#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
-#define MC_CMD_NVRAM_TEST_OUT_LEN 4
-#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
-#define MC_CMD_NVRAM_TEST_PASS 0
-#define MC_CMD_NVRAM_TEST_FAIL 1
-#define MC_CMD_NVRAM_TEST_NOTSUPP 2
-
-/* MC_CMD_MRSFP_TWEAK: (debug)
- * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first.
- *
- * Locks required: None
- * Return code: 0, EINVAL
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver.
*/
#define MC_CMD_MRSFP_TWEAK 0x4d
-#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
-#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
-#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
-
-/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
- * Change bits of network port state for test purposes in ways that would never be
- * useful in normal operation and so need a special command to change. */
-#define MC_CMD_TEST_HACK 0x2f
-#define MC_CMD_TEST_HACK_IN_LEN 8
-#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
-#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
-#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
-#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
-#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
-#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
-#define MC_CMD_TEST_HACK_OUT_LEN 0
-
-/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
- * is a warranty-voiding operation.
- *
- * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
- * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
- * of these limits are meaningful and what their interpretation is is sensor-specific.
- *
- * OUT: nothing
- *
- * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
- * out of range.
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits.
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-
-/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
- * used for post-3.0 extensions. If you run out of space, look for gaps or
- * commands that are unused in the existing range. */
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
index 6c63ab0710af..7bcad899a936 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
@@ -116,7 +116,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
goto fail;
}
- *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED);
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
return 0;
@@ -264,22 +264,22 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
/* TODO: Advertise the capabilities supported by this PHY */
supported = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN))
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
supported |= PHY_MODE_TX_DISABLED;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN))
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
supported |= PHY_MODE_LOW_POWER;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN))
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
supported |= PHY_MODE_OFF;
mode = efx->phy_mode & supported;
flags = 0;
if (mode & PHY_MODE_TX_DISABLED)
- flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN);
+ flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
if (mode & PHY_MODE_LOW_POWER)
- flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN);
+ flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
if (mode & PHY_MODE_OFF)
- flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN);
+ flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
return flags;
}
@@ -436,8 +436,8 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
break;
}
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN));
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
link_state->speed = speed;
}
@@ -592,7 +592,7 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
return -EIO;
- if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
@@ -680,7 +680,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
u32 mode;
int rc;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
if (rc < 0)
return rc;
@@ -691,15 +691,15 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
/* If we support both LONG and SHORT, then run each in response to
* break or not. Otherwise, run the one we support */
mode = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
if ((flags & ETH_TEST_FL_OFFLINE) &&
(phy_cfg->flags &
- (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
mode = MC_CMD_PHY_BIST_CABLE_LONG;
else
mode = MC_CMD_PHY_BIST_CABLE_SHORT;
} else if (phy_cfg->flags &
- (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
mode = MC_CMD_PHY_BIST_CABLE_LONG;
if (mode != 0) {
@@ -717,14 +717,14 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
if (index == 0)
return "bist";
--index;
}
- if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
- (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
if (index == 0)
return "cable";
--index;
@@ -741,7 +741,7 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
- .init = efx_port_dummy_op_int,
+ .init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 7ab385c8136d..9acfd6696ffb 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -228,7 +228,7 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
/**
* efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
- * @ecmd: New settings
+ * @ecmd: New settings
*/
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index bc9dcd6b30d7..26b3c23b0b6f 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -280,7 +280,7 @@ fail:
--part;
efx_mtd_remove_partition(part);
}
- /* mtd_device_register() returns 1 if the MTD table is full */
+ /* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM;
}
@@ -382,7 +382,7 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
return rc;
}
-static struct efx_mtd_ops falcon_mtd_ops = {
+static const struct efx_mtd_ops falcon_mtd_ops = {
.read = falcon_mtd_read,
.erase = falcon_mtd_erase,
.write = falcon_mtd_write,
@@ -560,7 +560,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
return rc;
}
-static struct efx_mtd_ops siena_mtd_ops = {
+static const struct efx_mtd_ops siena_mtd_ops = {
.read = siena_mtd_read,
.erase = siena_mtd_erase,
.write = siena_mtd_write,
@@ -572,7 +572,7 @@ struct siena_nvram_type_info {
const char *name;
};
-static struct siena_nvram_type_info siena_nvram_types[] = {
+static const struct siena_nvram_type_info siena_nvram_types[] = {
[MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
[MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
[MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
@@ -593,7 +593,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
unsigned int type)
{
struct efx_mtd_partition *part = &efx_mtd->part[part_id];
- struct siena_nvram_type_info *info;
+ const struct siena_nvram_type_info *info;
size_t size, erase_size;
bool protected;
int rc;
@@ -627,11 +627,10 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
struct efx_mtd *efx_mtd)
{
struct efx_mtd_partition *part;
- uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
- sizeof(uint16_t)];
+ uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
int rc;
- rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c49502bab6a3..f0385e1fb2d8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -13,10 +13,6 @@
#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H
-#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
-#define DEBUG
-#endif
-
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -28,6 +24,7 @@
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
@@ -42,7 +39,7 @@
#define EFX_DRIVER_VERSION "3.1"
-#ifdef EFX_ENABLE_DEBUG
+#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
#else
@@ -56,8 +53,10 @@
*
**************************************************************************/
-#define EFX_MAX_CHANNELS 32
+#define EFX_MAX_CHANNELS 32U
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+#define EFX_EXTRA_CHANNEL_IOV 0
+#define EFX_MAX_EXTRA_CHANNELS 1U
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
@@ -85,15 +84,8 @@ struct efx_special_buffer {
void *addr;
dma_addr_t dma_addr;
unsigned int len;
- int index;
- int entries;
-};
-
-enum efx_flush_state {
- FLUSH_NONE,
- FLUSH_PENDING,
- FLUSH_FAILED,
- FLUSH_DONE,
+ unsigned int index;
+ unsigned int entries;
};
/**
@@ -142,7 +134,6 @@ struct efx_tx_buffer {
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised?
- * @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked.
@@ -185,7 +176,6 @@ struct efx_tx_queue {
struct efx_special_buffer txd;
unsigned int ptr_mask;
bool initialised;
- enum efx_flush_state flushed;
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
@@ -209,12 +199,12 @@ struct efx_tx_queue {
/**
* struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer
- * @skb: The associated socket buffer, if any.
- * If both this and page are %NULL, the buffer slot is currently free.
- * @page: The associated page buffer, if any.
- * If both this and skb are %NULL, the buffer slot is currently free.
+ * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
+ * Will be %NULL if the buffer slot is currently free.
+ * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
+ * Will be %NULL if the buffer slot is currently free.
* @len: Buffer length, in bytes.
- * @is_page: Indicates if @page is valid. If false, @skb is valid.
+ * @flags: Flags for buffer and packet state.
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
@@ -223,8 +213,11 @@ struct efx_rx_buffer {
struct page *page;
} u;
unsigned int len;
- bool is_page;
+ u16 flags;
};
+#define EFX_RX_BUF_PAGE 0x0001
+#define EFX_RX_PKT_CSUMMED 0x0002
+#define EFX_RX_PKT_DISCARD 0x0004
/**
* struct efx_rx_page_state - Page-based rx buffer state
@@ -250,6 +243,9 @@ struct efx_rx_page_state {
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @enabled: Receive queue enabled indicator.
+ * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
+ * @rxq_flush_pending.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
@@ -264,13 +260,14 @@ struct efx_rx_page_state {
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
- * @flushed: Use when handling queue flushing
*/
struct efx_rx_queue {
struct efx_nic *efx;
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
unsigned int ptr_mask;
+ bool enabled;
+ bool flush_pending;
int added_count;
int notified_count;
@@ -284,8 +281,6 @@ struct efx_rx_queue {
unsigned int alloc_skb_count;
struct timer_list slow_fill;
unsigned int slow_fill_count;
-
- enum efx_flush_state flushed;
};
/**
@@ -319,6 +314,7 @@ enum efx_rx_alloc_method {
*
* @efx: Associated Efx NIC
* @channel: Channel instance number
+ * @type: Channel type definition
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -328,7 +324,7 @@ enum efx_rx_alloc_method {
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
- * @last_eventq_read_ptr: Last event queue read pointer value.
+ * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -348,6 +344,7 @@ enum efx_rx_alloc_method {
struct efx_channel {
struct efx_nic *efx;
int channel;
+ const struct efx_channel_type *type;
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -357,7 +354,7 @@ struct efx_channel {
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
- unsigned int last_eventq_read_ptr;
+ int event_test_cpu;
unsigned int irq_count;
unsigned int irq_mod_score;
@@ -380,12 +377,31 @@ struct efx_channel {
* access with prefetches.
*/
struct efx_rx_buffer *rx_pkt;
- bool rx_pkt_csummed;
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
};
+/**
+ * struct efx_channel_type - distinguishes traffic and extra channels
+ * @handle_no_channel: Handle failure to allocate an extra channel
+ * @pre_probe: Set up extra state prior to initialisation
+ * @post_remove: Tear down extra state after finalisation, if allocated.
+ * May be called on channels that have not been probed.
+ * @get_name: Generate the channel's name (used for its IRQ handler)
+ * @copy: Copy the channel state prior to reallocation. May be %NULL if
+ * reallocation is not supported.
+ * @keep_eventq: Flag for whether event queue should be kept initialised
+ * while the device is stopped
+ */
+struct efx_channel_type {
+ void (*handle_no_channel)(struct efx_nic *);
+ int (*pre_probe)(struct efx_channel *);
+ void (*get_name)(struct efx_channel *, char *buf, size_t len);
+ struct efx_channel *(*copy)(const struct efx_channel *);
+ bool keep_eventq;
+};
+
enum efx_led_mode {
EFX_LED_OFF = 0,
EFX_LED_ON = 1,
@@ -395,12 +411,12 @@ enum efx_led_mode {
#define STRING_TABLE_LOOKUP(val, member) \
((val) < member ## _max) ? member ## _names[val] : "(invalid)"
-extern const char *efx_loopback_mode_names[];
+extern const char *const efx_loopback_mode_names[];
extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
-extern const char *efx_reset_type_names[];
+extern const char *const efx_reset_type_names[];
extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
STRING_TABLE_LOOKUP(type, efx_reset_type)
@@ -474,18 +490,6 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
}
/**
- * struct efx_mac_operations - Efx MAC operations table
- * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
- * @update_stats: Update statistics
- * @check_fault: Check fault state. True if fault present.
- */
-struct efx_mac_operations {
- int (*reconfigure) (struct efx_nic *efx);
- void (*update_stats) (struct efx_nic *efx);
- bool (*check_fault)(struct efx_nic *efx);
-};
-
-/**
* struct efx_phy_operations - Efx PHY operations table
* @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
* efx->loopback_modes.
@@ -552,64 +556,64 @@ struct efx_mac_stats {
u64 tx_bytes;
u64 tx_good_bytes;
u64 tx_bad_bytes;
- unsigned long tx_packets;
- unsigned long tx_bad;
- unsigned long tx_pause;
- unsigned long tx_control;
- unsigned long tx_unicast;
- unsigned long tx_multicast;
- unsigned long tx_broadcast;
- unsigned long tx_lt64;
- unsigned long tx_64;
- unsigned long tx_65_to_127;
- unsigned long tx_128_to_255;
- unsigned long tx_256_to_511;
- unsigned long tx_512_to_1023;
- unsigned long tx_1024_to_15xx;
- unsigned long tx_15xx_to_jumbo;
- unsigned long tx_gtjumbo;
- unsigned long tx_collision;
- unsigned long tx_single_collision;
- unsigned long tx_multiple_collision;
- unsigned long tx_excessive_collision;
- unsigned long tx_deferred;
- unsigned long tx_late_collision;
- unsigned long tx_excessive_deferred;
- unsigned long tx_non_tcpudp;
- unsigned long tx_mac_src_error;
- unsigned long tx_ip_src_error;
+ u64 tx_packets;
+ u64 tx_bad;
+ u64 tx_pause;
+ u64 tx_control;
+ u64 tx_unicast;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_lt64;
+ u64 tx_64;
+ u64 tx_65_to_127;
+ u64 tx_128_to_255;
+ u64 tx_256_to_511;
+ u64 tx_512_to_1023;
+ u64 tx_1024_to_15xx;
+ u64 tx_15xx_to_jumbo;
+ u64 tx_gtjumbo;
+ u64 tx_collision;
+ u64 tx_single_collision;
+ u64 tx_multiple_collision;
+ u64 tx_excessive_collision;
+ u64 tx_deferred;
+ u64 tx_late_collision;
+ u64 tx_excessive_deferred;
+ u64 tx_non_tcpudp;
+ u64 tx_mac_src_error;
+ u64 tx_ip_src_error;
u64 rx_bytes;
u64 rx_good_bytes;
u64 rx_bad_bytes;
- unsigned long rx_packets;
- unsigned long rx_good;
- unsigned long rx_bad;
- unsigned long rx_pause;
- unsigned long rx_control;
- unsigned long rx_unicast;
- unsigned long rx_multicast;
- unsigned long rx_broadcast;
- unsigned long rx_lt64;
- unsigned long rx_64;
- unsigned long rx_65_to_127;
- unsigned long rx_128_to_255;
- unsigned long rx_256_to_511;
- unsigned long rx_512_to_1023;
- unsigned long rx_1024_to_15xx;
- unsigned long rx_15xx_to_jumbo;
- unsigned long rx_gtjumbo;
- unsigned long rx_bad_lt64;
- unsigned long rx_bad_64_to_15xx;
- unsigned long rx_bad_15xx_to_jumbo;
- unsigned long rx_bad_gtjumbo;
- unsigned long rx_overflow;
- unsigned long rx_missed;
- unsigned long rx_false_carrier;
- unsigned long rx_symbol_error;
- unsigned long rx_align_error;
- unsigned long rx_length_error;
- unsigned long rx_internal_error;
- unsigned long rx_good_lt64;
+ u64 rx_packets;
+ u64 rx_good;
+ u64 rx_bad;
+ u64 rx_pause;
+ u64 rx_control;
+ u64 rx_unicast;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_lt64;
+ u64 rx_64;
+ u64 rx_65_to_127;
+ u64 rx_128_to_255;
+ u64 rx_256_to_511;
+ u64 rx_512_to_1023;
+ u64 rx_1024_to_15xx;
+ u64 rx_15xx_to_jumbo;
+ u64 rx_gtjumbo;
+ u64 rx_bad_lt64;
+ u64 rx_bad_64_to_15xx;
+ u64 rx_bad_15xx_to_jumbo;
+ u64 rx_bad_gtjumbo;
+ u64 rx_overflow;
+ u64 rx_missed;
+ u64 rx_false_carrier;
+ u64 rx_symbol_error;
+ u64 rx_align_error;
+ u64 rx_length_error;
+ u64 rx_internal_error;
+ u64 rx_good_lt64;
};
/* Number of bits used in a multicast filter hash address */
@@ -625,6 +629,8 @@ union efx_multicast_hash {
};
struct efx_filter_state;
+struct efx_vf;
+struct vfdi_status;
/**
* struct efx_nic - an Efx NIC
@@ -640,6 +646,7 @@ struct efx_filter_state;
* @membase_phys: Memory BAR value as physical address
* @membase: Memory BAR value
* @interrupt_mode: Interrupt mode
+ * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
@@ -649,8 +656,13 @@ struct efx_filter_state;
* @rx_queue: RX DMA queues
* @channel: Channels
* @channel_name: Names for channels and their IRQs
+ * @extra_channel_types: Types of extra (non-traffic) channels that
+ * should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
+ * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
+ * @sram_lim_qw: Qword address limit of SRAM
* @next_buffer_table: First available buffer table id
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -663,7 +675,8 @@ struct efx_filter_state;
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
- * @fatal_irq_level: IRQ level (bit number) used for serious errors
+ * @irq_level: IRQ level/index for IRQs not triggered by an event queue
+ * @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
@@ -676,7 +689,6 @@ struct efx_filter_state;
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @stats_buffer: DMA buffer for statistics
- * @mac_op: MAC interface
* @phy_type: PHY type
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
@@ -689,21 +701,42 @@ struct efx_filter_state;
* @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
* @multicast_hash: Multicast hash table
* @wanted_fc: Wanted flow control flags
+ * @fc_disable: When non-zero flow control is disabled. Typically used to
+ * ensure that network back pressure doesn't delay dma queue flushes.
+ * Serialised by the rtnl lock.
* @mac_work: Work item for changing MAC promiscuity and multicast hash
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
+ * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
+ * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
+ * Decremented when the efx_flush_rx_queue() is called.
+ * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
+ * completed (either success or failure). Not used when MCDI is used to
+ * flush receive queues.
+ * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
+ * @vf: Array of &struct efx_vf objects.
+ * @vf_count: Number of VFs intended to be enabled.
+ * @vf_init_count: Number of VFs that have been fully initialised.
+ * @vi_scale: log2 number of vnics per VF.
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ * %local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
- * @last_irq_cpu: Last CPU to handle interrupt.
- * This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by efx_nic_test_interrupt()
- * to verify that an interrupt has occurred.
+ * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
+ * field is used by efx_test_interrupts() to verify that an
+ * interrupt has occurred.
* @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
* @stats_lock: Statistics update lock. Serialises statistics fetches
+ * and access to @mac_stats.
*
* This is stored in the private area of the &struct net_device.
*/
@@ -722,6 +755,7 @@ struct efx_nic {
void __iomem *membase;
enum efx_int_mode interrupt_mode;
+ unsigned int timer_quantum_ns;
bool irq_rx_adaptive;
unsigned int irq_rx_moderation;
u32 msg_enable;
@@ -731,12 +765,18 @@ struct efx_nic {
struct efx_channel *channel[EFX_MAX_CHANNELS];
char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ const struct efx_channel_type *
+ extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
unsigned rxq_entries;
unsigned txq_entries;
+ unsigned tx_dc_base;
+ unsigned rx_dc_base;
+ unsigned sram_lim_qw;
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
+ unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned int rx_buffer_len;
@@ -749,7 +789,8 @@ struct efx_nic {
struct efx_buffer irq_status;
unsigned irq_zero_count;
- unsigned fatal_irq_level;
+ unsigned irq_level;
+ struct delayed_work selftest_work;
#ifdef CONFIG_SFC_MTD
struct list_head mtd_list;
@@ -766,8 +807,6 @@ struct efx_nic {
struct efx_buffer stats_buffer;
- const struct efx_mac_operations *mac_op;
-
unsigned int phy_type;
const struct efx_phy_operations *phy_op;
void *phy_data;
@@ -782,6 +821,7 @@ struct efx_nic {
bool promiscuous;
union efx_multicast_hash multicast_hash;
u8 wanted_fc;
+ unsigned fc_disable;
atomic_t rx_reset;
enum efx_loopback_mode loopback_mode;
@@ -791,11 +831,30 @@ struct efx_nic {
struct efx_filter_state *filter_state;
+ atomic_t drain_pending;
+ atomic_t rxq_flush_pending;
+ atomic_t rxq_flush_outstanding;
+ wait_queue_head_t flush_wq;
+
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_channel *vfdi_channel;
+ struct efx_vf *vf;
+ unsigned vf_count;
+ unsigned vf_init_count;
+ unsigned vi_scale;
+ unsigned vf_buftbl_base;
+ struct efx_buffer vfdi_status;
+ struct list_head local_addr_list;
+ struct list_head local_page_list;
+ struct mutex local_lock;
+ struct work_struct peer_work;
+#endif
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock;
- volatile signed int last_irq_cpu;
+ int last_irq_cpu;
unsigned n_rx_nodesc_drop_cnt;
struct efx_mac_stats mac_stats;
spinlock_t stats_lock;
@@ -806,15 +865,6 @@ static inline int efx_dev_registered(struct efx_nic *efx)
return efx->net_dev->reg_state == NETREG_REGISTERED;
}
-/* Net device name, for inclusion in log messages if it has been registered.
- * Use efx->name not efx->net_dev->name so that races with (un)registration
- * are harmless.
- */
-static inline const char *efx_dev_name(struct efx_nic *efx)
-{
- return efx_dev_registered(efx) ? efx->name : "";
-}
-
static inline unsigned int efx_port_num(struct efx_nic *efx)
{
return efx->net_dev->dev_id;
@@ -825,6 +875,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
* @init: Initialise the controller
+ * @dimension_resources: Dimension controller resources (buffer table,
+ * and VIs once the available interrupt resources are clear)
* @fini: Shut down the controller
* @monitor: Periodic function for polling link state and hardware monitor
* @map_reset_reason: Map ethtool reset reason to a reset method
@@ -840,14 +892,15 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
- * @push_multicast_hash: Apply multicast hash table
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
+ * to the hardware. Serialised by the mac_lock.
+ * @check_mac_fault: Check MAC fault state. True if fault present.
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
* @test_registers: Test read/write functionality of control registers
* @test_nvram: Test validity of NVRAM contents
- * @default_mac_ops: efx_mac_operations to set at startup
* @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address
@@ -862,8 +915,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
* descriptors
- * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
- * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
+ * @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
*/
@@ -871,6 +923,7 @@ struct efx_nic_type {
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx);
+ void (*dimension_resources)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -885,14 +938,14 @@ struct efx_nic_type {
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
- void (*push_multicast_hash)(struct efx_nic *efx);
int (*reconfigure_port)(struct efx_nic *efx);
+ int (*reconfigure_mac)(struct efx_nic *efx);
+ bool (*check_mac_fault)(struct efx_nic *efx);
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
int (*test_registers)(struct efx_nic *efx);
int (*test_nvram)(struct efx_nic *efx);
- const struct efx_mac_operations *default_mac_ops;
int revision;
unsigned int mem_map_size;
@@ -906,8 +959,7 @@ struct efx_nic_type {
unsigned int rx_buffer_padding;
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
- unsigned int tx_dc_base;
- unsigned int rx_dc_base;
+ unsigned int timer_period_max;
netdev_features_t offload_features;
};
@@ -931,6 +983,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
_channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
(_efx)->channel[_channel->channel + 1] : NULL)
+/* Iterate over all used channels in reverse */
+#define efx_for_each_channel_rev(_channel, _efx) \
+ for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
+ _channel; \
+ _channel = _channel->channel ? \
+ (_efx)->channel[_channel->channel - 1] : NULL)
+
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
@@ -971,16 +1030,12 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = (_channel)->tx_queue; \
- _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
- _tx_queue++)
-
-static inline struct efx_rx_queue *
-efx_get_rx_queue(struct efx_nic *efx, unsigned index)
-{
- EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
- return &efx->channel[index]->rx_queue;
-}
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 3edfbaf5f022..4a9a5beec8fc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -49,24 +49,29 @@
#define EFX_INT_ERROR_EXPIRE 3600
#define EFX_MAX_INT_ERRORS 5
-/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
- */
-#define EFX_FLUSH_INTERVAL 10
-#define EFX_FLUSH_POLL_COUNT 100
-
-/* Size and alignment of special buffers (4KB) */
-#define EFX_BUF_SIZE 4096
-
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
-/* Generated event code for efx_generate_test_event() */
-#define EFX_CHANNEL_MAGIC_TEST(_channel) \
- (0x00010100 + (_channel)->channel)
-
-/* Generated event code for efx_generate_fill_event() */
-#define EFX_CHANNEL_MAGIC_FILL(_channel) \
- (0x00010200 + (_channel)->channel)
+/* Driver generated events */
+#define _EFX_CHANNEL_MAGIC_TEST 0x000101
+#define _EFX_CHANNEL_MAGIC_FILL 0x000102
+#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
/**************************************************************************
*
@@ -187,7 +192,7 @@ static void
efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc;
- int index;
+ unsigned int index;
dma_addr_t dma_addr;
int i;
@@ -196,7 +201,7 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
- dma_addr = buffer->dma_addr + (i * 4096);
+ dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
netif_dbg(efx, probe, efx->net_dev,
"mapping special buffer %d at %llx\n",
index, (unsigned long long)dma_addr);
@@ -259,6 +264,10 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
+#ifdef CONFIG_SFC_SRIOV
+ BUG_ON(efx_sriov_enabled(efx) &&
+ efx->vf_buftbl_base < efx->next_buffer_table);
+#endif
netif_dbg(efx, probe, efx->net_dev,
"allocating special buffers %d-%d at %llx+%x "
@@ -430,8 +439,6 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
- tx_queue->flushed = FLUSH_NONE;
-
/* Pin TX descriptor ring */
efx_init_special_buffer(efx, &tx_queue->txd);
@@ -488,9 +495,6 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq;
- tx_queue->flushed = FLUSH_PENDING;
-
- /* Post a flush command */
EFX_POPULATE_OWORD_2(tx_flush_descq,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -502,9 +506,6 @@ void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_desc_ptr;
- /* The queue should have been flushed */
- WARN_ON(tx_queue->flushed != FLUSH_DONE);
-
/* Remove TX descriptor ring from card */
EFX_ZERO_OWORD(tx_desc_ptr);
efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -595,8 +596,6 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
- rx_queue->flushed = FLUSH_NONE;
-
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
@@ -625,9 +624,6 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
efx_oword_t rx_flush_descq;
- rx_queue->flushed = FLUSH_PENDING;
-
- /* Post a flush command */
EFX_POPULATE_OWORD_2(rx_flush_descq,
FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_RX_FLUSH_DESCQ,
@@ -640,9 +636,6 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
- /* The queue should already have been flushed */
- WARN_ON(rx_queue->flushed != FLUSH_DONE);
-
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
@@ -660,6 +653,103 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
/**************************************************************************
*
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* efx_nic_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool efx_flush_wake(struct efx_nic *efx)
+{
+ /* Ensure that all updates are visible to efx_nic_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->drain_pending) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * are no more RX and TX events left on any channel. */
+int efx_nic_flush_queues(struct efx_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ efx->fc_disable++;
+ efx->type->prepare_flush(efx);
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ atomic_inc(&efx->drain_pending);
+ efx_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ atomic_inc(&efx->drain_pending);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->drain_pending) > 0) {
+ /* If SRIOV is enabled, then offload receive queue flushing to
+ * the firmware (though we will still have to poll for
+ * completion). If that fails, fall back to the old scheme.
+ */
+ if (efx_sriov_enabled(efx)) {
+ rc = efx_mcdi_flush_rxqs(efx);
+ if (!rc)
+ goto wait;
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EFX_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ efx_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ wait:
+ timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->drain_pending)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->drain_pending, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ efx->fc_disable--;
+
+ return rc;
+}
+
+/**************************************************************************
+ *
* Event queue processing
* Event queues are processed by per-channel tasklets.
*
@@ -682,7 +772,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
}
/* Use HW to insert a SW defined event */
-static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
+void efx_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event)
{
efx_oword_t drv_ev_reg;
@@ -692,8 +783,18 @@ static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
drv_ev_reg.u32[1] = event->u32[1];
drv_ev_reg.u32[2] = 0;
drv_ev_reg.u32[3] = 0;
- EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
- efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void efx_magic_event(struct efx_channel *channel, u32 magic)
+{
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel->efx, channel->channel, &event);
}
/* Handle a transmit completion event
@@ -710,6 +811,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
@@ -718,7 +822,6 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
tx_queue->ptr_mask);
- channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -726,11 +829,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
- if (efx_dev_registered(efx))
- netif_tx_lock(efx->net_dev);
+ netif_tx_lock(efx->net_dev);
efx_notify_tx_desc(tx_queue);
- if (efx_dev_registered(efx))
- netif_tx_unlock(efx->net_dev);
+ netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -745,10 +846,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
-static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
- const efx_qword_t *event,
- bool *rx_ev_pkt_ok,
- bool *discard)
+static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
@@ -793,15 +892,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
++channel->n_rx_tcp_udp_chksum_err;
}
- /* The frame must be discarded if any of these are true. */
- *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
- rx_ev_tobe_disc | rx_ev_pause_frm);
-
/* TOBE_DISC is expected on unicast mismatches; don't print out an
* error message. FRM_TRUNC indicates RXDP dropped the packet due
* to a FIFO overflow.
*/
-#ifdef EFX_ENABLE_DEBUG
+#ifdef DEBUG
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
@@ -819,6 +914,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EFX_RX_PKT_DISCARD : 0;
}
/* Handle receive events that are not in-order. */
@@ -851,8 +951,13 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
- bool rx_ev_pkt_ok, discard = false, checksummed;
+ bool rx_ev_pkt_ok;
+ u16 flags;
struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
/* Basic packet information */
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -874,12 +979,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
/* If packet is marked as OK and packet type is TCP/IP or
* UDP/IP, then we can rely on the hardware checksum.
*/
- checksummed =
- rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
- rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
+ flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
+ EFX_RX_PKT_CSUMMED : 0;
} else {
- efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
- checksummed = false;
+ flags = efx_handle_rx_not_ok(rx_queue, event);
}
/* Detect multicast packets that didn't match the filter */
@@ -890,35 +994,111 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch;
- discard = true;
+ flags |= EFX_RX_PKT_DISCARD;
}
}
channel->irq_mod_score += 2;
/* Handle received packet */
- efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
- checksummed, discard);
+ efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
+}
+
+/* If this flush done event corresponds to a &struct efx_tx_queue, then
+ * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_tx_queue *tx_queue;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+
+ efx_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+}
+
+/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
+ * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = efx_get_channel(efx, qid);
+ if (!efx_channel_has_rx_queue(channel))
+ return;
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ efx_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (efx_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+efx_handle_drain_event(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->drain_pending) == 0);
+ atomic_dec(&efx->drain_pending);
+ if (efx_flush_wake(efx))
+ wake_up(&efx->flush_wq);
}
static void
efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- unsigned code;
+ struct efx_rx_queue *rx_queue =
+ efx_channel_has_rx_queue(channel) ?
+ efx_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
- code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
- if (code == EFX_CHANNEL_MAGIC_TEST(channel))
- ; /* ignore */
- else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+ magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EFX_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
- else
+ efx_fast_push_rx_descriptors(rx_queue);
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ rx_queue->enabled = false;
+ efx_handle_drain_event(channel);
+ } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
+ efx_handle_drain_event(channel);
+ } else {
netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(*event));
+ }
}
static void
@@ -935,10 +1115,14 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
+ efx_handle_tx_flush_done(efx, event);
+ efx_sriov_tx_flush_done(efx, event);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
+ efx_handle_rx_flush_done(efx, event);
+ efx_sriov_rx_flush_done(efx, event);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -970,16 +1154,24 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
- netif_err(efx, rx_err, efx->net_dev,
- "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
- netif_err(efx, tx_err, efx->net_dev,
- "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1039,6 +1231,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_handle_driver_event(channel, &event);
break;
+ case FSE_CZ_EV_CODE_USER_EV:
+ efx_sriov_event(channel, &event);
+ break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
@@ -1137,163 +1332,17 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
}
-void efx_nic_generate_test_event(struct efx_channel *channel)
+void efx_nic_event_test_start(struct efx_channel *channel)
{
- unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
- efx_qword_t test_event;
-
- EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
- FSE_AZ_EV_CODE_DRV_GEN_EV,
- FSF_AZ_DRV_GEN_EV_MAGIC, magic);
- efx_generate_event(channel, &test_event);
+ channel->event_test_cpu = -1;
+ smp_wmb();
+ efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
}
-void efx_nic_generate_fill_event(struct efx_channel *channel)
+void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
{
- unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
- efx_qword_t test_event;
-
- EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
- FSE_AZ_EV_CODE_DRV_GEN_EV,
- FSF_AZ_DRV_GEN_EV_MAGIC, magic);
- efx_generate_event(channel, &test_event);
-}
-
-/**************************************************************************
- *
- * Flush handling
- *
- **************************************************************************/
-
-
-static void efx_poll_flush_events(struct efx_nic *efx)
-{
- struct efx_channel *channel = efx_get_channel(efx, 0);
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
-
- do {
- efx_qword_t *event = efx_event(channel, read_ptr);
- int ev_code, ev_sub_code, ev_queue;
- bool ev_failed;
-
- if (!efx_event_present(event))
- break;
-
- ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
- ev_sub_code = EFX_QWORD_FIELD(*event,
- FSF_AZ_DRIVER_EV_SUBCODE);
- if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
- ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
- ev_queue = EFX_QWORD_FIELD(*event,
- FSF_AZ_DRIVER_EV_SUBDATA);
- if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx_get_tx_queue(
- efx, ev_queue / EFX_TXQ_TYPES,
- ev_queue % EFX_TXQ_TYPES);
- tx_queue->flushed = FLUSH_DONE;
- }
- } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
- ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
- ev_queue = EFX_QWORD_FIELD(
- *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
- ev_failed = EFX_QWORD_FIELD(
- *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (ev_queue < efx->n_rx_channels) {
- rx_queue = efx_get_rx_queue(efx, ev_queue);
- rx_queue->flushed =
- ev_failed ? FLUSH_FAILED : FLUSH_DONE;
- }
- }
-
- /* We're about to destroy the queue anyway, so
- * it's ok to throw away every non-flush event */
- EFX_SET_QWORD(*event);
-
- ++read_ptr;
- } while (read_ptr != end_ptr);
-
- channel->eventq_read_ptr = read_ptr;
-}
-
-/* Handle tx and rx flushes at the same time, since they run in
- * parallel in the hardware and there's no reason for us to
- * serialise them */
-int efx_nic_flush_queues(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int i, tx_pending, rx_pending;
-
- /* If necessary prepare the hardware for flushing */
- efx->type->prepare_flush(efx);
-
- /* Flush all tx queues in parallel */
- efx_for_each_channel(channel, efx) {
- efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->initialised)
- efx_flush_tx_queue(tx_queue);
- }
- }
-
- /* The hardware supports four concurrent rx flushes, each of which may
- * need to be retried if there is an outstanding descriptor fetch */
- for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
- rx_pending = tx_pending = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (rx_queue->flushed == FLUSH_PENDING)
- ++rx_pending;
- }
- }
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (rx_pending == EFX_RX_FLUSH_COUNT)
- break;
- if (rx_queue->flushed == FLUSH_FAILED ||
- rx_queue->flushed == FLUSH_NONE) {
- efx_flush_rx_queue(rx_queue);
- ++rx_pending;
- }
- }
- efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->initialised &&
- tx_queue->flushed != FLUSH_DONE)
- ++tx_pending;
- }
- }
-
- if (rx_pending == 0 && tx_pending == 0)
- return 0;
-
- msleep(EFX_FLUSH_INTERVAL);
- efx_poll_flush_events(efx);
- }
-
- /* Mark the queues as all flushed. We're going to return failure
- * leading to a reset, or fake up success anyway */
- efx_for_each_channel(channel, efx) {
- efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->initialised &&
- tx_queue->flushed != FLUSH_DONE)
- netif_err(efx, hw, efx->net_dev,
- "tx queue %d flush command timed out\n",
- tx_queue->queue);
- tx_queue->flushed = FLUSH_DONE;
- }
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (rx_queue->flushed != FLUSH_DONE)
- netif_err(efx, hw, efx->net_dev,
- "rx queue %d flush command timed out\n",
- efx_rx_queue_index(rx_queue));
- rx_queue->flushed = FLUSH_DONE;
- }
- }
-
- return -ETIMEDOUT;
+ efx_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_FILL(rx_queue));
}
/**************************************************************************
@@ -1311,7 +1360,7 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
efx_oword_t int_en_reg_ker;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1319,18 +1368,10 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
void efx_nic_enable_interrupts(struct efx_nic *efx)
{
- struct efx_channel *channel;
-
EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
- /* Enable interrupts */
efx_nic_interrupts(efx, true, false);
-
- /* Force processing of all the channels to get the EVQ RPTRs up to
- date */
- efx_for_each_channel(channel, efx)
- efx_schedule_channel(channel);
}
void efx_nic_disable_interrupts(struct efx_nic *efx)
@@ -1343,8 +1384,10 @@ void efx_nic_disable_interrupts(struct efx_nic *efx)
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
-void efx_nic_generate_interrupt(struct efx_nic *efx)
+void efx_nic_irq_test_start(struct efx_nic *efx)
{
+ efx->last_irq_cpu = -1;
+ smp_wmb();
efx_nic_interrupts(efx, true, true);
}
@@ -1427,11 +1470,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
- /* Check to see if we have a serious error condition */
- if (queues & (1U << efx->fatal_irq_level)) {
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level)) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
}
if (queues != 0) {
@@ -1441,7 +1485,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
/* Schedule processing of any interrupting queues */
efx_for_each_channel(channel, efx) {
if (queues & 1)
- efx_schedule_channel(channel);
+ efx_schedule_channel_irq(channel);
queues >>= 1;
}
result = IRQ_HANDLED;
@@ -1458,18 +1502,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
- efx_schedule_channel(channel);
+ efx_schedule_channel_irq(channel);
else
efx_nic_eventq_read_ack(channel);
}
}
- if (result == IRQ_HANDLED) {
- efx->last_irq_cpu = raw_smp_processor_id();
+ if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
- }
return result;
}
@@ -1488,20 +1530,20 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
- efx->last_irq_cpu = raw_smp_processor_id();
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- /* Check to see if we have a serious error condition */
- if (channel->channel == efx->fatal_irq_level) {
+ /* Handle non-event-queue sources */
+ if (channel->channel == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
}
/* Schedule processing of the channel */
- efx_schedule_channel(channel);
+ efx_schedule_channel_irq(channel);
return IRQ_HANDLED;
}
@@ -1598,6 +1640,58 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
free_irq(efx->legacy_irq, efx);
}
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx_sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+
+ efx->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
+ }
+#endif
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
u32 efx_nic_fpga_ver(struct efx_nic *efx)
{
efx_oword_t altera_build;
@@ -1610,11 +1704,9 @@ void efx_nic_init_common(struct efx_nic *efx)
efx_oword_t temp;
/* Set positions of descriptor caches in SRAM. */
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
- efx->type->tx_dc_base / 8);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
- efx->type->rx_dc_base / 8);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
/* Set TX descriptor cache size. */
@@ -1640,10 +1732,10 @@ void efx_nic_init_common(struct efx_nic *efx)
if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
/* Use an interrupt level unused by event queues */
- efx->fatal_irq_level = 0x1f;
+ efx->irq_level = 0x1f;
else
/* Use a valid MSI-X vector */
- efx->fatal_irq_level = 0;
+ efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
@@ -1837,7 +1929,7 @@ struct efx_nic_reg_table {
REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
step, rows \
}
-#define REGISTER_TABLE(name, min_rev, max_rev) \
+#define REGISTER_TABLE(name, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
name, FR_ ## min_rev ## max_rev ## _ ## name, \
min_rev, max_rev, \
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 5fb24d3aa3ca..f48ccf6bb3b9 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -35,10 +35,6 @@ static inline int efx_nic_rev(struct efx_nic *efx)
extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
-static inline bool efx_nic_has_mc(struct efx_nic *efx)
-{
- return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
-}
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
{
@@ -65,11 +61,14 @@ enum {
#define FALCON_GMAC_LOOPBACKS \
(1 << LOOPBACK_GMAC)
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE 4096
+/* Size and alignment of buffer table entries (same) */
+#define EFX_BUF_SIZE EFX_PAGE_SIZE
+
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
- * @ref_model: Model number of Solarflare reference design
- * @gen_type: Generic board type description
* @init: Allocate resources and initialise peripheral hardware
* @init_phy: Do board-specific PHY initialisation
* @fini: Shut down hardware and free resources
@@ -78,8 +77,6 @@ enum {
*/
struct falcon_board_type {
u8 id;
- const char *ref_model;
- const char *gen_type;
int (*init) (struct efx_nic *nic);
void (*init_phy) (struct efx_nic *efx);
void (*fini) (struct efx_nic *nic);
@@ -144,12 +141,115 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
* struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
+ * @hwmon: Hardware monitor state
*/
struct siena_nic_data {
struct efx_mcdi_iface mcdi;
int wol_filter_id;
+#ifdef CONFIG_SFC_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
};
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data;
+ EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ nic_data = efx->nic_data;
+ return &nic_data->hwmon;
+}
+#endif
+
+/*
+ * On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest. The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value. */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI \
+ ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
+ sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_sriov_wanted(struct efx_nic *efx)
+{
+ return efx->vf_count != 0;
+}
+static inline bool efx_sriov_enabled(struct efx_nic *efx)
+{
+ return efx->vf_init_count != 0;
+}
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+
+extern int efx_init_sriov(void);
+extern void efx_sriov_probe(struct efx_nic *efx);
+extern int efx_sriov_init(struct efx_nic *efx);
+extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
+extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+extern void efx_sriov_reset(struct efx_nic *efx);
+extern void efx_sriov_fini(struct efx_nic *efx);
+extern void efx_fini_sriov(void);
+
+#else
+
+static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
+static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
+
+static inline int efx_init_sriov(void) { return 0; }
+static inline void efx_sriov_probe(struct efx_nic *efx) {}
+static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_sriov_event(struct efx_channel *channel,
+ efx_qword_t *event) {}
+static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
+static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
+static inline void efx_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_sriov_fini(struct efx_nic *efx) {}
+static inline void efx_fini_sriov(void) {}
+
+#endif
+
+extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos);
+extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
+
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
@@ -176,6 +276,7 @@ extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
+extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
/* Event data path */
extern int efx_nic_probe_eventq(struct efx_channel *channel);
@@ -189,21 +290,29 @@ extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+extern bool falcon_xmac_check_fault(struct efx_nic *efx);
+extern int falcon_reconfigure_xmac(struct efx_nic *efx);
+extern void falcon_update_stats_xmac(struct efx_nic *efx);
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel);
-extern void efx_nic_generate_fill_event(struct efx_channel *channel);
-extern void efx_nic_generate_interrupt(struct efx_nic *efx);
+extern void efx_nic_event_test_start(struct efx_channel *channel);
+extern void efx_nic_irq_test_start(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
extern void falcon_irq_ack_a1(struct efx_nic *efx);
-#define EFX_IRQ_MOD_RESOLUTION 5
-#define EFX_IRQ_MOD_MAX 0x1000
+static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
+{
+ return ACCESS_ONCE(channel->event_test_cpu);
+}
+static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
+{
+ return ACCESS_ONCE(efx->last_irq_cpu);
+}
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
@@ -211,6 +320,8 @@ extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
+extern void
+efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
extern void efx_nic_init_common(struct efx_nic *efx);
extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
@@ -264,8 +375,8 @@ extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
#define MAC_DATA_LBN 0
#define MAC_DATA_WIDTH 32
-extern void efx_nic_generate_event(struct efx_channel *channel,
- efx_qword_t *event);
+extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
extern void falcon_poll_xmac(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 7ad97e397406..8a7caf88ffb6 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -47,7 +47,7 @@
#define PMA_PMD_FTX_STATIC_LBN 13
#define PMA_PMD_VEND1_REG 0xc001
#define PMA_PMD_VEND1_LBTXD_LBN 15
-#define PCS_VEND1_REG 0xc000
+#define PCS_VEND1_REG 0xc000
#define PCS_VEND1_LBTXD_LBN 5
void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
@@ -453,9 +453,9 @@ const struct efx_phy_operations falcon_qt202x_phy_ops = {
.probe = qt202x_phy_probe,
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
- .poll = qt202x_phy_poll,
+ .poll = qt202x_phy_poll,
.fini = efx_port_dummy_op_void,
- .remove = qt202x_phy_remove,
+ .remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h
index cc2c86b76a7b..ade4c4dc56ca 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/regs.h
@@ -2446,8 +2446,8 @@
#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
-#define FRF_CZ_RMFT_DEST_MAC_LBN 16
-#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
#define FRF_CZ_RMFT_VLAN_ID_LBN 0
#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
@@ -2523,8 +2523,8 @@
#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
-#define FRF_CZ_TMFT_SRC_MAC_LBN 16
-#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
#define FRF_CZ_TMFT_VLAN_ID_LBN 0
#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
@@ -2895,17 +2895,17 @@
/* RX_MAC_FILTER_TBL0 */
/* RMFT_DEST_MAC is wider than 32 bits */
-#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
-#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
-#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
/* TX_MAC_FILTER_TBL0 */
/* TMFT_SRC_MAC is wider than 32 bits */
-#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
-#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
-#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
/* TX_PACE_TBL */
/* Values >20 are documented as reserved, but will result in a queue going
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index fc52fca74193..763fa2fe1a38 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -98,8 +98,8 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
- return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
- efx->type->rx_buffer_hash_size);
+ return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
+ efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
@@ -108,11 +108,10 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
- if (buf->is_page)
+ if (buf->flags & EFX_RX_BUF_PAGE)
return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
else
- return ((u8 *)buf->u.skb->data +
- efx->type->rx_buffer_hash_size);
+ return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
}
static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -122,10 +121,10 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
return __le32_to_cpup((const __le32 *)(eh - 4));
#else
const u8 *data = eh - 4;
- return ((u32)data[0] |
- (u32)data[1] << 8 |
- (u32)data[2] << 16 |
- (u32)data[3] << 24);
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
#endif
}
@@ -159,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
/* Adjust the SKB for padding */
skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->is_page = false;
+ rx_buf->flags = 0;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
skb->data, rx_buf->len,
@@ -227,7 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
- rx_buf->is_page = true;
+ rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
++rx_queue->alloc_page_count;
++state->refcnt;
@@ -248,7 +247,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->is_page && rx_buf->u.page) {
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state;
state = page_address(rx_buf->u.page);
@@ -258,7 +257,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
}
- } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE);
}
@@ -267,10 +266,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
static void efx_free_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->is_page && rx_buf->u.page) {
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
__free_pages(rx_buf->u.page, efx->rx_buffer_order);
rx_buf->u.page = NULL;
- } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dev_kfree_skb_any(rx_buf->u.skb);
rx_buf->u.skb = NULL;
}
@@ -310,7 +309,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
new_buf->u.page = rx_buf->u.page;
new_buf->len = rx_buf->len;
- new_buf->is_page = true;
+ new_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
}
@@ -324,7 +323,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *new_buf;
unsigned index;
- if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ rx_buf->flags &= EFX_RX_BUF_PAGE;
+
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
+ efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
page_count(rx_buf->u.page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
@@ -402,17 +404,15 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
void efx_rx_slow_fill(unsigned long context)
{
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
/* Post an event to cause NAPI to run and refill the queue */
- efx_nic_generate_fill_event(channel);
+ efx_nic_generate_fill_event(rx_queue);
++rx_queue->slow_fill_count;
}
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
- int len, bool *discard,
- bool *leak_packet)
+ int len, bool *leak_packet)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -423,7 +423,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
/* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was
*/
- *discard = true;
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
if (net_ratelimit())
@@ -436,7 +436,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
*/
- *leak_packet = !rx_buf->is_page;
+ *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -449,20 +449,17 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through the generic GRO stack
- *
- * Handles driverlink veto, and passes the fragment up via
- * the appropriate GRO method
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
*/
static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
- const u8 *eh, bool checksummed)
+ const u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
- /* Pass the skb/page into the GRO engine */
- if (rx_buf->is_page) {
+ if (rx_buf->flags & EFX_RX_BUF_PAGE) {
struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->u.page;
struct sk_buff *skb;
@@ -484,8 +481,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
skb->truesize += rx_buf->len;
- skb->ip_summed =
- checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
skb_record_rx_queue(skb, channel->channel);
@@ -493,7 +490,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
} else {
struct sk_buff *skb = rx_buf->u.skb;
- EFX_BUG_ON_PARANOID(!checksummed);
+ EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
rx_buf->u.skb = NULL;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -509,7 +506,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
}
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, bool checksummed, bool discard)
+ unsigned int len, u16 flags)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
@@ -517,6 +514,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->flags |= flags;
/* This allows the refill path to post another buffer.
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -525,18 +523,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_queue->removed_count++;
/* Validate the length encoded in the event vs the descriptor pushed */
- efx_rx_packet__check_len(rx_queue, rx_buf, len,
- &discard, &leak_packet);
+ efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x at %llx+%x %s%s\n",
efx_rx_queue_index(rx_queue), index,
(unsigned long long)rx_buf->dma_addr, len,
- (checksummed ? " [SUMMED]" : ""),
- (discard ? " [DISCARD]" : ""));
+ (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
+ (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
/* Discard packet, if instructed to do so */
- if (unlikely(discard)) {
+ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
if (unlikely(leak_packet))
channel->n_skbuff_leaks++;
else
@@ -563,18 +560,33 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_buf->len = len - efx->type->rx_buffer_hash_size;
out:
if (channel->rx_pkt)
- __efx_rx_packet(channel,
- channel->rx_pkt, channel->rx_pkt_csummed);
+ __efx_rx_packet(channel, channel->rx_pkt);
channel->rx_pkt = rx_buf;
- channel->rx_pkt_csummed = checksummed;
+}
+
+static void efx_rx_deliver(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct sk_buff *skb;
+
+ /* We now own the SKB */
+ skb = rx_buf->u.skb;
+ rx_buf->u.skb = NULL;
+
+ /* Set the SKB flags */
+ skb_checksum_none_assert(skb);
+
+ /* Pass the packet up */
+ netif_receive_skb(skb);
+
+ /* Update allocation strategy method */
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
}
/* Handle a received packet. Second half: Touches packet payload. */
-void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf, bool checksummed)
+void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = channel->efx;
- struct sk_buff *skb;
u8 *eh = efx_rx_buf_eh(efx, rx_buf);
/* If we're in loopback test, then pass the packet directly to the
@@ -586,8 +598,8 @@ void __efx_rx_packet(struct efx_channel *channel,
return;
}
- if (!rx_buf->is_page) {
- skb = rx_buf->u.skb;
+ if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
+ struct sk_buff *skb = rx_buf->u.skb;
prefetch(skb_shinfo(skb));
@@ -605,25 +617,12 @@ void __efx_rx_packet(struct efx_channel *channel,
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
- checksummed = false;
-
- if (likely(checksummed || rx_buf->is_page)) {
- efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
- return;
- }
+ rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- /* We now own the SKB */
- skb = rx_buf->u.skb;
- rx_buf->u.skb = NULL;
-
- /* Set the SKB flags */
- skb_checksum_none_assert(skb);
-
- /* Pass the packet up */
- netif_receive_skb(skb);
-
- /* Update allocation strategy method */
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+ if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
+ efx_rx_packet_gro(channel, rx_buf, eh);
+ else
+ efx_rx_deliver(channel, rx_buf);
}
void efx_rx_strategy(struct efx_channel *channel)
@@ -703,6 +702,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->fast_fill_limit = limit;
/* Set up RX descriptor ring */
+ rx_queue->enabled = true;
efx_nic_init_rx(rx_queue);
}
@@ -714,6 +714,9 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+ /* A flush failure might have left rx_queue->enabled */
+ rx_queue->enabled = false;
+
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 52edd24fcde3..de4c0069f5b2 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -19,13 +19,22 @@
#include <linux/udp.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
-#include <asm/io.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
+/* IRQ latency can be enormous because:
+ * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
+ * slow serial console or an old IDE driver doing error recovery
+ * - The PREEMPT_RT patches mostly deal with this, but also allow a
+ * tasklet or normal task to be given higher priority than our IRQ
+ * threads
+ * Try to avoid blaming the hardware for this.
+ */
+#define IRQ_TIMEOUT HZ
+
/*
* Loopback test packet structure
*
@@ -50,7 +59,7 @@ static const char payload_msg[] =
/* Interrupt mode names */
static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
-static const char *efx_interrupt_mode_names[] = {
+static const char *const efx_interrupt_mode_names[] = {
[EFX_INT_MODE_MSIX] = "MSI-X",
[EFX_INT_MODE_MSI] = "MSI",
[EFX_INT_MODE_LEGACY] = "legacy",
@@ -78,6 +87,9 @@ struct efx_loopback_state {
struct efx_loopback_payload payload;
};
+/* How long to wait for all the packets to arrive (in ms) */
+#define LOOPBACK_TIMEOUT_MS 1000
+
/**************************************************************************
*
* MII, NVRAM and register tests
@@ -131,87 +143,117 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
+ unsigned long timeout, wait;
+ int cpu;
+
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
- /* Reset interrupt flag */
- efx->last_irq_cpu = -1;
- smp_wmb();
-
- efx_nic_generate_interrupt(efx);
+ efx_nic_irq_test_start(efx);
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
- schedule_timeout_uninterruptible(HZ / 10);
- if (efx->last_irq_cpu >= 0)
- goto success;
+ do {
+ schedule_timeout_uninterruptible(wait);
+ cpu = efx_nic_irq_test_irq_cpu(efx);
+ if (cpu >= 0)
+ goto success;
+ wait *= 2;
+ } while (time_before(jiffies, timeout));
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
- INT_MODE(efx),
- efx->last_irq_cpu);
+ INT_MODE(efx), cpu);
tests->interrupt = 1;
return 0;
}
/* Test generation and receipt of interrupting events */
-static int efx_test_eventq_irq(struct efx_channel *channel,
+static int efx_test_eventq_irq(struct efx_nic *efx,
struct efx_self_tests *tests)
{
- struct efx_nic *efx = channel->efx;
- unsigned int read_ptr, count;
+ struct efx_channel *channel;
+ unsigned int read_ptr[EFX_MAX_CHANNELS];
+ unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
+ unsigned long timeout, wait;
- tests->eventq_dma[channel->channel] = -1;
- tests->eventq_int[channel->channel] = -1;
- tests->eventq_poll[channel->channel] = -1;
+ BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
- read_ptr = channel->eventq_read_ptr;
- channel->efx->last_irq_cpu = -1;
- smp_wmb();
+ efx_for_each_channel(channel, efx) {
+ read_ptr[channel->channel] = channel->eventq_read_ptr;
+ set_bit(channel->channel, &dma_pend);
+ set_bit(channel->channel, &int_pend);
+ efx_nic_event_test_start(channel);
+ }
- efx_nic_generate_test_event(channel);
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
- /* Wait for arrival of interrupt */
- count = 0;
+ /* Wait for arrival of interrupts. NAPI processing may or may
+ * not complete in time, but we can cope in any case.
+ */
do {
- schedule_timeout_uninterruptible(HZ / 100);
-
- if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
- goto eventq_ok;
- } while (++count < 2);
-
- netif_err(efx, drv, efx->net_dev,
- "channel %d timed out waiting for event queue\n",
- channel->channel);
+ schedule_timeout_uninterruptible(wait);
+
+ efx_for_each_channel(channel, efx) {
+ napi_disable(&channel->napi_str);
+ if (channel->eventq_read_ptr !=
+ read_ptr[channel->channel]) {
+ set_bit(channel->channel, &napi_ran);
+ clear_bit(channel->channel, &dma_pend);
+ clear_bit(channel->channel, &int_pend);
+ } else {
+ if (efx_nic_event_present(channel))
+ clear_bit(channel->channel, &dma_pend);
+ if (efx_nic_event_test_irq_cpu(channel) >= 0)
+ clear_bit(channel->channel, &int_pend);
+ }
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+ }
- /* See if interrupt arrived */
- if (channel->efx->last_irq_cpu >= 0) {
- netif_err(efx, drv, efx->net_dev,
- "channel %d saw interrupt on CPU%d "
- "during event queue test\n", channel->channel,
- raw_smp_processor_id());
- tests->eventq_int[channel->channel] = 1;
- }
+ wait *= 2;
+ } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
- /* Check to see if event was received even if interrupt wasn't */
- if (efx_nic_event_present(channel)) {
- netif_err(efx, drv, efx->net_dev,
- "channel %d event was generated, but "
- "failed to trigger an interrupt\n", channel->channel);
- tests->eventq_dma[channel->channel] = 1;
+ efx_for_each_channel(channel, efx) {
+ bool dma_seen = !test_bit(channel->channel, &dma_pend);
+ bool int_seen = !test_bit(channel->channel, &int_pend);
+
+ tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
+ tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+
+ if (dma_seen && int_seen) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "channel %d event queue passed (with%s NAPI)\n",
+ channel->channel,
+ test_bit(channel->channel, &napi_ran) ?
+ "" : "out");
+ } else {
+ /* Report failure and whether either interrupt or DMA
+ * worked
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
+ if (int_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt "
+ "during event queue test\n",
+ channel->channel);
+ if (dma_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n",
+ channel->channel);
+ }
}
- return -ETIMEDOUT;
- eventq_ok:
- netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
- channel->channel);
- tests->eventq_dma[channel->channel] = 1;
- tests->eventq_int[channel->channel] = 1;
- tests->eventq_poll[channel->channel] = 1;
- return 0;
+ return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
}
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
@@ -316,7 +358,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
return;
err:
-#ifdef EFX_ENABLE_DEBUG
+#ifdef DEBUG
if (atomic_read(&state->rx_bad) == 0) {
netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
@@ -395,11 +437,9 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
* interrupt handler. */
smp_wmb();
- if (efx_dev_registered(efx))
- netif_tx_lock_bh(efx->net_dev);
+ netif_tx_lock_bh(efx->net_dev);
rc = efx_enqueue_skb(tx_queue, skb);
- if (efx_dev_registered(efx))
- netif_tx_unlock_bh(efx->net_dev);
+ netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev,
@@ -440,20 +480,18 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
int tx_done = 0, rx_good, rx_bad;
int i, rc = 0;
- if (efx_dev_registered(efx))
- netif_tx_lock_bh(efx->net_dev);
+ netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */
- for (i=0; i < state->packet_count; i++) {
+ for (i = 0; i < state->packet_count; i++) {
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
dev_kfree_skb_any(skb);
}
- if (efx_dev_registered(efx))
- netif_tx_unlock_bh(efx->net_dev);
+ netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good);
@@ -518,10 +556,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
begin_rc = efx_begin_loopback(tx_queue);
/* This will normally complete very quickly, but be
- * prepared to wait up to 100 ms. */
+ * prepared to wait much longer. */
msleep(1);
if (!efx_poll_loopback(efx)) {
- msleep(100);
+ msleep(LOOPBACK_TIMEOUT_MS);
efx_poll_loopback(efx);
}
@@ -570,7 +608,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
link_up = link_state->up;
if (link_up)
- link_up = !efx->mac_op->check_fault(efx);
+ link_up = !efx->type->check_mac_fault(efx);
mutex_unlock(&efx->mac_lock);
if (link_up) {
@@ -662,9 +700,10 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
- struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
+ efx_selftest_async_cancel(efx);
+
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
@@ -680,11 +719,9 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
if (rc && !rc_test)
rc_test = rc;
- efx_for_each_channel(channel, efx) {
- rc = efx_test_eventq_irq(channel, tests);
- if (rc && !rc_test)
- rc_test = rc;
- }
+ rc = efx_test_eventq_irq(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
if (rc_test)
return rc_test;
@@ -759,3 +796,36 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
return rc_test;
}
+void efx_selftest_async_start(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_nic_event_test_start(channel);
+ schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
+}
+
+void efx_selftest_async_cancel(struct efx_nic *efx)
+{
+ cancel_delayed_work_sync(&efx->selftest_work);
+}
+
+void efx_selftest_async_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ selftest_work.work);
+ struct efx_channel *channel;
+ int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = efx_nic_event_test_irq_cpu(channel);
+ if (cpu < 0)
+ netif_err(efx, ifup, efx->net_dev,
+ "channel %d failed to trigger an interrupt\n",
+ channel->channel);
+ else
+ netif_dbg(efx, ifup, efx->net_dev,
+ "channel %d triggered interrupt on CPU %d\n",
+ channel->channel, cpu);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index dba5456e70f3..aed24b736059 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -37,7 +37,6 @@ struct efx_self_tests {
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
- int eventq_poll[EFX_MAX_CHANNELS];
/* offline tests */
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
@@ -49,5 +48,8 @@ extern void efx_loopback_rx_packet(struct efx_nic *efx,
extern int efx_selftest(struct efx_nic *efx,
struct efx_self_tests *tests,
unsigned flags);
+extern void efx_selftest_async_start(struct efx_nic *efx);
+extern void efx_selftest_async_cancel(struct efx_nic *efx);
+extern void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4d5d619feaa6..9f8d7cea3967 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -18,7 +18,6 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "mac.h"
#include "spi.h"
#include "regs.h"
#include "io.h"
@@ -36,8 +35,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
{
efx_dword_t timer_cmd;
- BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
-
if (channel->irq_moderation)
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
@@ -53,15 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
-static void siena_push_multicast_hash(struct efx_nic *efx)
-{
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
- efx->multicast_hash.byte, sizeof(efx->multicast_hash),
- NULL, 0, NULL);
-}
-
static int siena_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
@@ -226,7 +214,24 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
static int siena_probe_nvconfig(struct efx_nic *efx)
{
- return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
+ u32 caps = 0;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
+
+ efx->timer_quantum_ns =
+ (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
+ 3072 : 6144; /* 768 cycles */
+ return rc;
+}
+
+static void siena_dimension_resources(struct efx_nic *efx)
+{
+ /* Each port has a small block of internal SRAM dedicated to
+ * the buffer table and descriptor caches. In theory we can
+ * map both blocks to one port, but we don't.
+ */
+ efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
}
static int siena_probe_nic(struct efx_nic *efx)
@@ -304,6 +309,12 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail5;
+
+ efx_sriov_probe(efx);
+
return 0;
fail5:
@@ -391,13 +402,14 @@ static int siena_init_nic(struct efx_nic *efx)
static void siena_remove_nic(struct efx_nic *efx)
{
+ efx_mcdi_mon_remove(efx);
+
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
/* Relinquish the device back to the BMC */
- if (efx_nic_has_mc(efx))
- efx_mcdi_drv_attach(efx, false, NULL);
+ efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
kfree(efx->nic_data);
@@ -617,6 +629,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
+ .dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = NULL,
.map_reset_reason = siena_map_reset_reason,
@@ -630,14 +643,14 @@ const struct efx_nic_type siena_a0_nic_type = {
.stop_stats = siena_stop_nic_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
- .push_multicast_hash = siena_push_multicast_hash,
+ .reconfigure_mac = efx_mcdi_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
.reconfigure_port = efx_mcdi_phy_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_registers = siena_test_registers,
.test_nvram = efx_mcdi_nvram_test_all,
- .default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
@@ -654,8 +667,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
- .tx_dc_base = 0x88000,
- .rx_dc_base = 0x68000,
+ .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
new file mode 100644
index 000000000000..9cb3b84ecae9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -0,0 +1,1643 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2010-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "io.h"
+#include "mcdi.h"
+#include "filter.h"
+#include "mcdi_pcol.h"
+#include "regs.h"
+#include "vfdi.h"
+
+/* Number of longs required to track all the VIs in a VF */
+#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
+
+/**
+ * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
+ * @VF_TX_FILTER_OFF: Disabled
+ * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
+ * 2 TX queues allowed per VF.
+ * @VF_TX_FILTER_ON: Enabled
+ */
+enum efx_vf_tx_filter_mode {
+ VF_TX_FILTER_OFF,
+ VF_TX_FILTER_AUTO,
+ VF_TX_FILTER_ON,
+};
+
+/**
+ * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * @efx: The Efx NIC owning this VF
+ * @pci_rid: The PCI requester ID for this VF
+ * @pci_name: The PCI name (formatted address) of this VF
+ * @index: Index of VF within its port and PF.
+ * @req: VFDI incoming request work item. Incoming USR_EV events are received
+ * by the NAPI handler, but must be handled by executing MCDI requests
+ * inside a work item.
+ * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
+ * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
+ * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
+ * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
+ * @status_lock
+ * @busy: VFDI request queued to be processed or being processed. Receiving
+ * a VFDI request when @busy is set is an error condition.
+ * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
+ * @buftbl_base: Buffer table entries for this VF start at this index.
+ * @rx_filtering: Receive filtering has been requested by the VF driver.
+ * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
+ * @rx_filter_qid: VF relative qid for RX filter requested by VF.
+ * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
+ * @tx_filter_mode: Transmit MAC filtering mode.
+ * @tx_filter_id: Transmit MAC filter ID.
+ * @addr: The MAC address and outer vlan tag of the VF.
+ * @status_addr: VF DMA address of page for &struct vfdi_status updates.
+ * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
+ * @peer_page_addrs and @peer_page_count from simultaneous
+ * updates by the VM and consumption by
+ * efx_sriov_update_vf_addr()
+ * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
+ * @peer_page_count: Number of entries in @peer_page_count.
+ * @evq0_addrs: Array of guest pages backing evq0.
+ * @evq0_count: Number of entries in @evq0_addrs.
+ * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
+ * to wait for flush completions.
+ * @txq_lock: Mutex for TX queue allocation.
+ * @txq_mask: Mask of initialized transmit queues.
+ * @txq_count: Number of initialized transmit queues.
+ * @rxq_mask: Mask of initialized receive queues.
+ * @rxq_count: Number of initialized receive queues.
+ * @rxq_retry_mask: Mask or receive queues that need to be flushed again
+ * due to flush failure.
+ * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
+ * @reset_work: Work item to schedule a VF reset.
+ */
+struct efx_vf {
+ struct efx_nic *efx;
+ unsigned int pci_rid;
+ char pci_name[13]; /* dddd:bb:dd.f */
+ unsigned int index;
+ struct work_struct req;
+ u64 req_addr;
+ int req_type;
+ unsigned req_seqno;
+ unsigned msg_seqno;
+ bool busy;
+ struct efx_buffer buf;
+ unsigned buftbl_base;
+ bool rx_filtering;
+ enum efx_filter_flags rx_filter_flags;
+ unsigned rx_filter_qid;
+ int rx_filter_id;
+ enum efx_vf_tx_filter_mode tx_filter_mode;
+ int tx_filter_id;
+ struct vfdi_endpoint addr;
+ u64 status_addr;
+ struct mutex status_lock;
+ u64 *peer_page_addrs;
+ unsigned peer_page_count;
+ u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
+ EFX_BUF_SIZE];
+ unsigned evq0_count;
+ wait_queue_head_t flush_waitq;
+ struct mutex txq_lock;
+ unsigned long txq_mask[VI_MASK_LENGTH];
+ unsigned txq_count;
+ unsigned long rxq_mask[VI_MASK_LENGTH];
+ unsigned rxq_count;
+ unsigned long rxq_retry_mask[VI_MASK_LENGTH];
+ atomic_t rxq_retry_count;
+ struct work_struct reset_work;
+};
+
+struct efx_memcpy_req {
+ unsigned int from_rid;
+ void *from_buf;
+ u64 from_addr;
+ unsigned int to_rid;
+ u64 to_addr;
+ unsigned length;
+};
+
+/**
+ * struct efx_local_addr - A MAC address on the vswitch without a VF.
+ *
+ * Siena does not have a switch, so VFs can't transmit data to each
+ * other. Instead the VFs must be made aware of the local addresses
+ * on the vswitch, so that they can arrange for an alternative
+ * software datapath to be used.
+ *
+ * @link: List head for insertion into efx->local_addr_list.
+ * @addr: Ethernet address
+ */
+struct efx_local_addr {
+ struct list_head link;
+ u8 addr[ETH_ALEN];
+};
+
+/**
+ * struct efx_endpoint_page - Page of vfdi_endpoint structures
+ *
+ * @link: List head for insertion into efx->local_page_list.
+ * @ptr: Pointer to page.
+ * @addr: DMA address of page.
+ */
+struct efx_endpoint_page {
+ struct list_head link;
+ void *ptr;
+ dma_addr_t addr;
+};
+
+/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
+#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
+ ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
+#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
+ (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
+ (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
+#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
+ (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
+ (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
+
+#define EFX_FIELD_MASK(_field) \
+ ((1 << _field ## _WIDTH) - 1)
+
+/* VFs can only use this many transmit channels */
+static unsigned int vf_max_tx_channels = 2;
+module_param(vf_max_tx_channels, uint, 0444);
+MODULE_PARM_DESC(vf_max_tx_channels,
+ "Limit the number of TX channels VFs can use");
+
+static int max_vfs = -1;
+module_param(max_vfs, int, 0444);
+MODULE_PARM_DESC(max_vfs,
+ "Reduce the number of VFs initialized by the driver");
+
+/* Workqueue used by VFDI communication. We can't use the global
+ * workqueue because it may be running the VF driver's probe()
+ * routine, which will be blocked there waiting for a VFDI response.
+ */
+static struct workqueue_struct *vfdi_workqueue;
+
+static unsigned abs_index(struct efx_vf *vf, unsigned index)
+{
+ return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
+}
+
+static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
+ unsigned *vi_scale_out, unsigned *vf_total_out)
+{
+ u8 inbuf[MC_CMD_SRIOV_IN_LEN];
+ u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
+ unsigned vi_scale, vf_total;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
+ MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
+ MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+ outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SRIOV_OUT_LEN)
+ return -EIO;
+
+ vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
+ vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
+ if (vi_scale > EFX_VI_SCALE_MAX)
+ return -EOPNOTSUPP;
+
+ if (vi_scale_out)
+ *vi_scale_out = vi_scale;
+ if (vf_total_out)
+ *vf_total_out = vf_total;
+
+ return 0;
+}
+
+static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
+{
+ efx_oword_t reg;
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_CZ_USREV_DIS, enabled ? 0 : 1,
+ FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
+ efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
+}
+
+static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
+ unsigned int count)
+{
+ u8 *inbuf, *record;
+ unsigned int used;
+ u32 from_rid, from_hi, from_lo;
+ int rc;
+
+ mb(); /* Finish writing source/reading dest before DMA starts */
+
+ used = MC_CMD_MEMCPY_IN_LEN(count);
+ if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
+ return -ENOBUFS;
+
+ /* Allocate room for the largest request */
+ inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
+ if (inbuf == NULL)
+ return -ENOMEM;
+
+ record = inbuf;
+ MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
+ while (count-- > 0) {
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
+ req->to_rid);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
+ (u32)req->to_addr);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
+ (u32)(req->to_addr >> 32));
+ if (req->from_buf == NULL) {
+ from_rid = req->from_rid;
+ from_lo = (u32)req->from_addr;
+ from_hi = (u32)(req->from_addr >> 32);
+ } else {
+ if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
+ from_lo = used;
+ from_hi = 0;
+ memcpy(inbuf + used, req->from_buf, req->length);
+ used += req->length;
+ }
+
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
+ from_lo);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
+ from_hi);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
+ req->length);
+
+ ++req;
+ record += MC_CMD_MEMCPY_IN_RECORD_LEN;
+ }
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
+out:
+ kfree(inbuf);
+
+ mb(); /* Don't write source/read dest before DMA is complete */
+
+ return rc;
+}
+
+/* The TX filter is entirely controlled by this driver, and is modified
+ * underneath the feet of the VF
+ */
+static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct efx_filter_spec filter;
+ u16 vlan;
+ int rc;
+
+ if (vf->tx_filter_id != -1) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ vf->tx_filter_id);
+ netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
+ vf->pci_name, vf->tx_filter_id);
+ vf->tx_filter_id = -1;
+ }
+
+ if (is_zero_ether_addr(vf->addr.mac_addr))
+ return;
+
+ /* Turn on TX filtering automatically if not explicitly
+ * enabled or disabled.
+ */
+ if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
+ vf->tx_filter_mode = VF_TX_FILTER_ON;
+
+ vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
+ efx_filter_init_tx(&filter, abs_index(vf, 0));
+ rc = efx_filter_set_eth_local(&filter,
+ vlan ? vlan : EFX_FILTER_VID_UNSPEC,
+ vf->addr.mac_addr);
+ BUG_ON(rc);
+
+ rc = efx_filter_insert_filter(efx, &filter, true);
+ if (rc < 0) {
+ netif_warn(efx, hw, efx->net_dev,
+ "Unable to migrate tx filter for vf %s\n",
+ vf->pci_name);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
+ vf->pci_name, rc);
+ vf->tx_filter_id = rc;
+ }
+}
+
+/* The RX filter is managed here on behalf of the VF driver */
+static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct efx_filter_spec filter;
+ u16 vlan;
+ int rc;
+
+ if (vf->rx_filter_id != -1) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ vf->rx_filter_id);
+ netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
+ vf->pci_name, vf->rx_filter_id);
+ vf->rx_filter_id = -1;
+ }
+
+ if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
+ return;
+
+ vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
+ efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
+ vf->rx_filter_flags,
+ abs_index(vf, vf->rx_filter_qid));
+ rc = efx_filter_set_eth_local(&filter,
+ vlan ? vlan : EFX_FILTER_VID_UNSPEC,
+ vf->addr.mac_addr);
+ BUG_ON(rc);
+
+ rc = efx_filter_insert_filter(efx, &filter, true);
+ if (rc < 0) {
+ netif_warn(efx, hw, efx->net_dev,
+ "Unable to insert rx filter for vf %s\n",
+ vf->pci_name);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
+ vf->pci_name, rc);
+ vf->rx_filter_id = rc;
+ }
+}
+
+static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
+{
+ efx_sriov_reset_tx_filter(vf);
+ efx_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &vf->efx->peer_work);
+}
+
+/* Push the peer list to this VF. The caller must hold status_lock to interlock
+ * with VFDI requests, and they must be serialised against manipulation of
+ * local_page_list, either by acquiring local_lock or by running from
+ * efx_sriov_peer_work()
+ */
+static void __efx_sriov_push_vf_status(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_status *status = efx->vfdi_status.addr;
+ struct efx_memcpy_req copy[4];
+ struct efx_endpoint_page *epp;
+ unsigned int pos, count;
+ unsigned data_offset;
+ efx_qword_t event;
+
+ WARN_ON(!mutex_is_locked(&vf->status_lock));
+ WARN_ON(!vf->status_addr);
+
+ status->local = vf->addr;
+ status->generation_end = ++status->generation_start;
+
+ memset(copy, '\0', sizeof(copy));
+ /* Write generation_start */
+ copy[0].from_buf = &status->generation_start;
+ copy[0].to_rid = vf->pci_rid;
+ copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
+ generation_start);
+ copy[0].length = sizeof(status->generation_start);
+ /* DMA the rest of the structure (excluding the generations). This
+ * assumes that the non-generation portion of vfdi_status is in
+ * one chunk starting at the version member.
+ */
+ data_offset = offsetof(struct vfdi_status, version);
+ copy[1].from_rid = efx->pci_dev->devfn;
+ copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
+ copy[1].to_rid = vf->pci_rid;
+ copy[1].to_addr = vf->status_addr + data_offset;
+ copy[1].length = status->length - data_offset;
+
+ /* Copy the peer pages */
+ pos = 2;
+ count = 0;
+ list_for_each_entry(epp, &efx->local_page_list, link) {
+ if (count == vf->peer_page_count) {
+ /* The VF driver will know they need to provide more
+ * pages because peer_addr_count is too large.
+ */
+ break;
+ }
+ copy[pos].from_buf = NULL;
+ copy[pos].from_rid = efx->pci_dev->devfn;
+ copy[pos].from_addr = epp->addr;
+ copy[pos].to_rid = vf->pci_rid;
+ copy[pos].to_addr = vf->peer_page_addrs[count];
+ copy[pos].length = EFX_PAGE_SIZE;
+
+ if (++pos == ARRAY_SIZE(copy)) {
+ efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+ pos = 0;
+ }
+ ++count;
+ }
+
+ /* Write generation_end */
+ copy[pos].from_buf = &status->generation_end;
+ copy[pos].to_rid = vf->pci_rid;
+ copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
+ generation_end);
+ copy[pos].length = sizeof(status->generation_end);
+ efx_sriov_memcpy(efx, copy, pos + 1);
+
+ /* Notify the guest */
+ EFX_POPULATE_QWORD_3(event,
+ FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
+ VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
+ VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
+ ++vf->msg_seqno;
+ efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
+ &event);
+}
+
+static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
+ u64 *addr, unsigned count)
+{
+ efx_qword_t buf;
+ unsigned pos;
+
+ for (pos = 0; pos < count; ++pos) {
+ EFX_POPULATE_QWORD_3(buf,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF,
+ addr ? addr[pos] >> 12 : 0,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
+ &buf, offset + pos);
+ }
+}
+
+static bool bad_vf_index(struct efx_nic *efx, unsigned index)
+{
+ return index >= efx_vf_size(efx);
+}
+
+static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
+{
+ unsigned max_buf_count = max_entry_count *
+ sizeof(efx_qword_t) / EFX_BUF_SIZE;
+
+ return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
+}
+
+/* Check that VI specified by per-port index belongs to a VF.
+ * Optionally set VF index and VI index within the VF.
+ */
+static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
+ struct efx_vf **vf_out, unsigned *rel_index_out)
+{
+ unsigned vf_i;
+
+ if (abs_index < EFX_VI_BASE)
+ return true;
+ vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
+ if (vf_i >= efx->vf_init_count)
+ return true;
+
+ if (vf_out)
+ *vf_out = efx->vf + vf_i;
+ if (rel_index_out)
+ *rel_index_out = abs_index % efx_vf_size(efx);
+ return false;
+}
+
+static int efx_vfdi_init_evq(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_evq = req->u.init_evq.index;
+ unsigned buf_count = req->u.init_evq.buf_count;
+ unsigned abs_evq = abs_index(vf, vf_evq);
+ unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) ||
+ bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
+ vf->pci_name, vf_evq, buf_count);
+ return VFDI_RC_EINVAL;
+ }
+
+ efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
+
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(buf_count),
+ FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
+ efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
+
+ if (vf_evq == 0) {
+ memcpy(vf->evq0_addrs, req->u.init_evq.addr,
+ buf_count * sizeof(u64));
+ vf->evq0_count = buf_count;
+ }
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_init_rxq(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_rxq = req->u.init_rxq.index;
+ unsigned vf_evq = req->u.init_rxq.evq;
+ unsigned buf_count = req->u.init_rxq.buf_count;
+ unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
+ unsigned label;
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
+ bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
+ "buf_count %d\n", vf->pci_name, vf_rxq,
+ vf_evq, buf_count);
+ return VFDI_RC_EINVAL;
+ }
+ if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
+ ++vf->rxq_count;
+ efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
+
+ label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
+ FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
+ FRF_AZ_RX_DESCQ_JUMBO,
+ !!(req->u.init_rxq.flags &
+ VFDI_RXQ_FLAG_SCATTER_EN),
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
+ abs_index(vf, vf_rxq));
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_init_txq(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_txq = req->u.init_txq.index;
+ unsigned vf_evq = req->u.init_txq.evq;
+ unsigned buf_count = req->u.init_txq.buf_count;
+ unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
+ unsigned label, eth_filt_en;
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
+ vf_txq >= vf_max_tx_channels ||
+ bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
+ "buf_count %d\n", vf->pci_name, vf_txq,
+ vf_evq, buf_count);
+ return VFDI_RC_EINVAL;
+ }
+
+ mutex_lock(&vf->txq_lock);
+ if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
+ ++vf->txq_count;
+ mutex_unlock(&vf->txq_lock);
+ efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
+
+ eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
+
+ label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
+ EFX_POPULATE_OWORD_8(reg,
+ FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
+ FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
+ FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
+ abs_index(vf, vf_txq));
+
+ return VFDI_RC_SUCCESS;
+}
+
+/* Returns true when efx_vfdi_fini_all_queues should wake */
+static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+{
+ /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
+ smp_mb();
+
+ return (!vf->txq_count && !vf->rxq_count) ||
+ atomic_read(&vf->rxq_retry_count);
+}
+
+static void efx_vfdi_flush_clear(struct efx_vf *vf)
+{
+ memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
+ vf->txq_count = 0;
+ memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
+ vf->rxq_count = 0;
+ memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
+ atomic_set(&vf->rxq_retry_count, 0);
+}
+
+static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ efx_oword_t reg;
+ unsigned count = efx_vf_size(efx);
+ unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
+ unsigned timeout = HZ;
+ unsigned index, rxqs_count;
+ __le32 *rxqs;
+ int rc;
+
+ rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
+ if (rxqs == NULL)
+ return VFDI_RC_ENOMEM;
+
+ rtnl_lock();
+ if (efx->fc_disable++ == 0)
+ efx_mcdi_set_mac(efx);
+ rtnl_unlock();
+
+ /* Flush all the initialized queues */
+ rxqs_count = 0;
+ for (index = 0; index < count; ++index) {
+ if (test_bit(index, vf->txq_mask)) {
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ,
+ vf_offset + index);
+ efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
+ }
+ if (test_bit(index, vf->rxq_mask))
+ rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
+ }
+
+ atomic_set(&vf->rxq_retry_count, 0);
+ while (timeout && (vf->rxq_count || vf->txq_count)) {
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
+ rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
+ WARN_ON(rc < 0);
+
+ timeout = wait_event_timeout(vf->flush_waitq,
+ efx_vfdi_flush_wake(vf),
+ timeout);
+ rxqs_count = 0;
+ for (index = 0; index < count; ++index) {
+ if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
+ atomic_dec(&vf->rxq_retry_count);
+ rxqs[rxqs_count++] =
+ cpu_to_le32(vf_offset + index);
+ }
+ }
+ }
+
+ rtnl_lock();
+ if (--efx->fc_disable == 0)
+ efx_mcdi_set_mac(efx);
+ rtnl_unlock();
+
+ /* Irrespective of success/failure, fini the queues */
+ EFX_ZERO_OWORD(reg);
+ for (index = 0; index < count; ++index) {
+ efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
+ vf_offset + index);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
+ vf_offset + index);
+ efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL,
+ vf_offset + index);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
+ vf_offset + index);
+ }
+ efx_sriov_bufs(efx, vf->buftbl_base, NULL,
+ EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
+ kfree(rxqs);
+ efx_vfdi_flush_clear(vf);
+
+ vf->evq0_count = 0;
+
+ return timeout ? 0 : VFDI_RC_ETIMEDOUT;
+}
+
+static int efx_vfdi_insert_filter(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_rxq = req->u.mac_filter.rxq;
+ unsigned flags;
+
+ if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
+ "flags 0x%x\n", vf->pci_name, vf_rxq,
+ req->u.mac_filter.flags);
+ return VFDI_RC_EINVAL;
+ }
+
+ flags = 0;
+ if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
+ flags |= EFX_FILTER_FLAG_RX_RSS;
+ if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
+ flags |= EFX_FILTER_FLAG_RX_SCATTER;
+ vf->rx_filter_flags = flags;
+ vf->rx_filter_qid = vf_rxq;
+ vf->rx_filtering = true;
+
+ efx_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &efx->peer_work);
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+{
+ vf->rx_filtering = false;
+ efx_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &vf->efx->peer_work);
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_set_status_page(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ u64 page_count = req->u.set_status_page.peer_page_count;
+ u64 max_page_count =
+ (EFX_PAGE_SIZE -
+ offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
+ / sizeof(req->u.set_status_page.peer_page_addr[0]);
+
+ if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid SET_STATUS_PAGE from %s\n",
+ vf->pci_name);
+ return VFDI_RC_EINVAL;
+ }
+
+ mutex_lock(&efx->local_lock);
+ mutex_lock(&vf->status_lock);
+ vf->status_addr = req->u.set_status_page.dma_addr;
+
+ kfree(vf->peer_page_addrs);
+ vf->peer_page_addrs = NULL;
+ vf->peer_page_count = 0;
+
+ if (page_count) {
+ vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
+ GFP_KERNEL);
+ if (vf->peer_page_addrs) {
+ memcpy(vf->peer_page_addrs,
+ req->u.set_status_page.peer_page_addr,
+ page_count * sizeof(u64));
+ vf->peer_page_count = page_count;
+ }
+ }
+
+ __efx_sriov_push_vf_status(vf);
+ mutex_unlock(&vf->status_lock);
+ mutex_unlock(&efx->local_lock);
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+{
+ mutex_lock(&vf->status_lock);
+ vf->status_addr = 0;
+ mutex_unlock(&vf->status_lock);
+
+ return VFDI_RC_SUCCESS;
+}
+
+typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+
+static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
+ [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
+ [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
+ [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
+ [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
+ [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
+ [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
+ [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
+ [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
+};
+
+static void efx_sriov_vfdi(struct work_struct *work)
+{
+ struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ struct efx_memcpy_req copy[2];
+ int rc;
+
+ /* Copy this page into the local address space */
+ memset(copy, '\0', sizeof(copy));
+ copy[0].from_rid = vf->pci_rid;
+ copy[0].from_addr = vf->req_addr;
+ copy[0].to_rid = efx->pci_dev->devfn;
+ copy[0].to_addr = vf->buf.dma_addr;
+ copy[0].length = EFX_PAGE_SIZE;
+ rc = efx_sriov_memcpy(efx, copy, 1);
+ if (rc) {
+ /* If we can't get the request, we can't reply to the caller */
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Unable to fetch VFDI request from %s rc %d\n",
+ vf->pci_name, -rc);
+ vf->busy = false;
+ return;
+ }
+
+ if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
+ rc = vfdi_ops[req->op](vf);
+ if (rc == 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "vfdi request %d from %s ok\n",
+ req->op, vf->pci_name);
+ }
+ } else {
+ netif_dbg(efx, hw, efx->net_dev,
+ "ERROR: Unrecognised request %d from VF %s addr "
+ "%llx\n", req->op, vf->pci_name,
+ (unsigned long long)vf->req_addr);
+ rc = VFDI_RC_EOPNOTSUPP;
+ }
+
+ /* Allow subsequent VF requests */
+ vf->busy = false;
+ smp_wmb();
+
+ /* Respond to the request */
+ req->rc = rc;
+ req->op = VFDI_OP_RESPONSE;
+
+ memset(copy, '\0', sizeof(copy));
+ copy[0].from_buf = &req->rc;
+ copy[0].to_rid = vf->pci_rid;
+ copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
+ copy[0].length = sizeof(req->rc);
+ copy[1].from_buf = &req->op;
+ copy[1].to_rid = vf->pci_rid;
+ copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
+ copy[1].length = sizeof(req->op);
+
+ (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+}
+
+
+
+/* After a reset the event queues inside the guests no longer exist. Fill the
+ * event ring in guest memory with VFDI reset events, then (re-initialise) the
+ * event queue to raise an interrupt. The guest driver will then recover.
+ */
+static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
+{
+ struct efx_nic *efx = vf->efx;
+ struct efx_memcpy_req copy_req[4];
+ efx_qword_t event;
+ unsigned int pos, count, k, buftbl, abs_evq;
+ efx_oword_t reg;
+ efx_dword_t ptr;
+ int rc;
+
+ BUG_ON(buffer->len != EFX_PAGE_SIZE);
+
+ if (!vf->evq0_count)
+ return;
+ BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
+
+ mutex_lock(&vf->status_lock);
+ EFX_POPULATE_QWORD_3(event,
+ FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
+ VFDI_EV_SEQ, vf->msg_seqno,
+ VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
+ vf->msg_seqno++;
+ for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
+ memcpy(buffer->addr + pos, &event, sizeof(event));
+
+ for (pos = 0; pos < vf->evq0_count; pos += count) {
+ count = min_t(unsigned, vf->evq0_count - pos,
+ ARRAY_SIZE(copy_req));
+ for (k = 0; k < count; k++) {
+ copy_req[k].from_buf = NULL;
+ copy_req[k].from_rid = efx->pci_dev->devfn;
+ copy_req[k].from_addr = buffer->dma_addr;
+ copy_req[k].to_rid = vf->pci_rid;
+ copy_req[k].to_addr = vf->evq0_addrs[pos + k];
+ copy_req[k].length = EFX_PAGE_SIZE;
+ }
+ rc = efx_sriov_memcpy(efx, copy_req, count);
+ if (rc) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Unable to notify %s of reset"
+ ": %d\n", vf->pci_name, -rc);
+ break;
+ }
+ }
+
+ /* Reinitialise, arm and trigger evq0 */
+ abs_evq = abs_index(vf, 0);
+ buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
+ efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
+
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
+ FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
+ efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
+ EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
+ efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
+
+ mutex_unlock(&vf->status_lock);
+}
+
+static void efx_sriov_reset_vf_work(struct work_struct *work)
+{
+ struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct efx_nic *efx = vf->efx;
+ struct efx_buffer buf;
+
+ if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
+ efx_sriov_reset_vf(vf, &buf);
+ efx_nic_free_buffer(efx, &buf);
+ }
+}
+
+static void efx_sriov_handle_no_channel(struct efx_nic *efx)
+{
+ netif_err(efx, drv, efx->net_dev,
+ "ERROR: IOV requires MSI-X and 1 additional interrupt"
+ "vector. IOV disabled\n");
+ efx->vf_count = 0;
+}
+
+static int efx_sriov_probe_channel(struct efx_channel *channel)
+{
+ channel->efx->vfdi_channel = channel;
+ return 0;
+}
+
+static void
+efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ snprintf(buf, len, "%s-iov", channel->efx->name);
+}
+
+static const struct efx_channel_type efx_sriov_channel_type = {
+ .handle_no_channel = efx_sriov_handle_no_channel,
+ .pre_probe = efx_sriov_probe_channel,
+ .get_name = efx_sriov_get_channel_name,
+ /* no copy operation; channel must not be reallocated */
+ .keep_eventq = true,
+};
+
+void efx_sriov_probe(struct efx_nic *efx)
+{
+ unsigned count;
+
+ if (!max_vfs)
+ return;
+
+ if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ return;
+ if (count > 0 && count > max_vfs)
+ count = max_vfs;
+
+ /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
+ efx->vf_count = count;
+
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
+}
+
+/* Copy the list of individual addresses into the vfdi_status.peers
+ * array and auxillary pages, protected by %local_lock. Drop that lock
+ * and then broadcast the address list to every VF.
+ */
+static void efx_sriov_peer_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
+ struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+ struct efx_vf *vf;
+ struct efx_local_addr *local_addr;
+ struct vfdi_endpoint *peer;
+ struct efx_endpoint_page *epp;
+ struct list_head pages;
+ unsigned int peer_space;
+ unsigned int peer_count;
+ unsigned int pos;
+
+ mutex_lock(&efx->local_lock);
+
+ /* Move the existing peer pages off %local_page_list */
+ INIT_LIST_HEAD(&pages);
+ list_splice_tail_init(&efx->local_page_list, &pages);
+
+ /* Populate the VF addresses starting from entry 1 (entry 0 is
+ * the PF address)
+ */
+ peer = vfdi_status->peers + 1;
+ peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
+ peer_count = 1;
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+
+ mutex_lock(&vf->status_lock);
+ if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
+ *peer++ = vf->addr;
+ ++peer_count;
+ --peer_space;
+ BUG_ON(peer_space == 0);
+ }
+ mutex_unlock(&vf->status_lock);
+ }
+
+ /* Fill the remaining addresses */
+ list_for_each_entry(local_addr, &efx->local_addr_list, link) {
+ memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
+ peer->tci = 0;
+ ++peer;
+ ++peer_count;
+ if (--peer_space == 0) {
+ if (list_empty(&pages)) {
+ epp = kmalloc(sizeof(*epp), GFP_KERNEL);
+ if (!epp)
+ break;
+ epp->ptr = dma_alloc_coherent(
+ &efx->pci_dev->dev, EFX_PAGE_SIZE,
+ &epp->addr, GFP_KERNEL);
+ if (!epp->ptr) {
+ kfree(epp);
+ break;
+ }
+ } else {
+ epp = list_first_entry(
+ &pages, struct efx_endpoint_page, link);
+ list_del(&epp->link);
+ }
+
+ list_add_tail(&epp->link, &efx->local_page_list);
+ peer = (struct vfdi_endpoint *)epp->ptr;
+ peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
+ }
+ }
+ vfdi_status->peer_count = peer_count;
+ mutex_unlock(&efx->local_lock);
+
+ /* Free any now unused endpoint pages */
+ while (!list_empty(&pages)) {
+ epp = list_first_entry(
+ &pages, struct efx_endpoint_page, link);
+ list_del(&epp->link);
+ dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
+ epp->ptr, epp->addr);
+ kfree(epp);
+ }
+
+ /* Finally, push the pages */
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+
+ mutex_lock(&vf->status_lock);
+ if (vf->status_addr)
+ __efx_sriov_push_vf_status(vf);
+ mutex_unlock(&vf->status_lock);
+ }
+}
+
+static void efx_sriov_free_local(struct efx_nic *efx)
+{
+ struct efx_local_addr *local_addr;
+ struct efx_endpoint_page *epp;
+
+ while (!list_empty(&efx->local_addr_list)) {
+ local_addr = list_first_entry(&efx->local_addr_list,
+ struct efx_local_addr, link);
+ list_del(&local_addr->link);
+ kfree(local_addr);
+ }
+
+ while (!list_empty(&efx->local_page_list)) {
+ epp = list_first_entry(&efx->local_page_list,
+ struct efx_endpoint_page, link);
+ list_del(&epp->link);
+ dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
+ epp->ptr, epp->addr);
+ kfree(epp);
+ }
+}
+
+static int efx_sriov_vf_alloc(struct efx_nic *efx)
+{
+ unsigned index;
+ struct efx_vf *vf;
+
+ efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
+ if (!efx->vf)
+ return -ENOMEM;
+
+ for (index = 0; index < efx->vf_count; ++index) {
+ vf = efx->vf + index;
+
+ vf->efx = efx;
+ vf->index = index;
+ vf->rx_filter_id = -1;
+ vf->tx_filter_mode = VF_TX_FILTER_AUTO;
+ vf->tx_filter_id = -1;
+ INIT_WORK(&vf->req, efx_sriov_vfdi);
+ INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
+ init_waitqueue_head(&vf->flush_waitq);
+ mutex_init(&vf->status_lock);
+ mutex_init(&vf->txq_lock);
+ }
+
+ return 0;
+}
+
+static void efx_sriov_vfs_fini(struct efx_nic *efx)
+{
+ struct efx_vf *vf;
+ unsigned int pos;
+
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+
+ efx_nic_free_buffer(efx, &vf->buf);
+ kfree(vf->peer_page_addrs);
+ vf->peer_page_addrs = NULL;
+ vf->peer_page_count = 0;
+
+ vf->evq0_count = 0;
+ }
+}
+
+static int efx_sriov_vfs_init(struct efx_nic *efx)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ unsigned index, devfn, sriov, buftbl_base;
+ u16 offset, stride;
+ struct efx_vf *vf;
+ int rc;
+
+ sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!sriov)
+ return -ENOENT;
+
+ pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
+
+ buftbl_base = efx->vf_buftbl_base;
+ devfn = pci_dev->devfn + offset;
+ for (index = 0; index < efx->vf_count; ++index) {
+ vf = efx->vf + index;
+
+ /* Reserve buffer entries */
+ vf->buftbl_base = buftbl_base;
+ buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
+
+ vf->pci_rid = devfn;
+ snprintf(vf->pci_name, sizeof(vf->pci_name),
+ "%04x:%02x:%02x.%d",
+ pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
+ if (rc)
+ goto fail;
+
+ devfn += stride;
+ }
+
+ return 0;
+
+fail:
+ efx_sriov_vfs_fini(efx);
+ return rc;
+}
+
+int efx_sriov_init(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct vfdi_status *vfdi_status;
+ int rc;
+
+ /* Ensure there's room for vf_channel */
+ BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
+ /* Ensure that VI_BASE is aligned on VI_SCALE */
+ BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
+
+ if (efx->vf_count == 0)
+ return 0;
+
+ rc = efx_sriov_cmd(efx, true, NULL, NULL);
+ if (rc)
+ goto fail_cmd;
+
+ rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
+ if (rc)
+ goto fail_status;
+ vfdi_status = efx->vfdi_status.addr;
+ memset(vfdi_status, 0, sizeof(*vfdi_status));
+ vfdi_status->version = 1;
+ vfdi_status->length = sizeof(*vfdi_status);
+ vfdi_status->max_tx_channels = vf_max_tx_channels;
+ vfdi_status->vi_scale = efx->vi_scale;
+ vfdi_status->rss_rxq_count = efx->rss_spread;
+ vfdi_status->peer_count = 1 + efx->vf_count;
+ vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
+
+ rc = efx_sriov_vf_alloc(efx);
+ if (rc)
+ goto fail_alloc;
+
+ mutex_init(&efx->local_lock);
+ INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
+ INIT_LIST_HEAD(&efx->local_addr_list);
+ INIT_LIST_HEAD(&efx->local_page_list);
+
+ rc = efx_sriov_vfs_init(efx);
+ if (rc)
+ goto fail_vfs;
+
+ rtnl_lock();
+ memcpy(vfdi_status->peers[0].mac_addr,
+ net_dev->dev_addr, ETH_ALEN);
+ efx->vf_init_count = efx->vf_count;
+ rtnl_unlock();
+
+ efx_sriov_usrev(efx, true);
+
+ /* At this point we must be ready to accept VFDI requests */
+
+ rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
+ if (rc)
+ goto fail_pci;
+
+ netif_info(efx, probe, net_dev,
+ "enabled SR-IOV for %d VFs, %d VI per VF\n",
+ efx->vf_count, efx_vf_size(efx));
+ return 0;
+
+fail_pci:
+ efx_sriov_usrev(efx, false);
+ rtnl_lock();
+ efx->vf_init_count = 0;
+ rtnl_unlock();
+ efx_sriov_vfs_fini(efx);
+fail_vfs:
+ cancel_work_sync(&efx->peer_work);
+ efx_sriov_free_local(efx);
+ kfree(efx->vf);
+fail_alloc:
+ efx_nic_free_buffer(efx, &efx->vfdi_status);
+fail_status:
+ efx_sriov_cmd(efx, false, NULL, NULL);
+fail_cmd:
+ return rc;
+}
+
+void efx_sriov_fini(struct efx_nic *efx)
+{
+ struct efx_vf *vf;
+ unsigned int pos;
+
+ if (efx->vf_init_count == 0)
+ return;
+
+ /* Disable all interfaces to reconfiguration */
+ BUG_ON(efx->vfdi_channel->enabled);
+ efx_sriov_usrev(efx, false);
+ rtnl_lock();
+ efx->vf_init_count = 0;
+ rtnl_unlock();
+
+ /* Flush all reconfiguration work */
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+ cancel_work_sync(&vf->req);
+ cancel_work_sync(&vf->reset_work);
+ }
+ cancel_work_sync(&efx->peer_work);
+
+ pci_disable_sriov(efx->pci_dev);
+
+ /* Tear down back-end state */
+ efx_sriov_vfs_fini(efx);
+ efx_sriov_free_local(efx);
+ kfree(efx->vf);
+ efx_nic_free_buffer(efx, &efx->vfdi_status);
+ efx_sriov_cmd(efx, false, NULL, NULL);
+}
+
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_vf *vf;
+ unsigned qid, seq, type, data;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
+
+ /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
+ BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
+ seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
+ type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
+ data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
+ qid, seq, type, data);
+
+ if (map_vi_index(efx, qid, &vf, NULL))
+ return;
+ if (vf->busy)
+ goto error;
+
+ if (type == VFDI_EV_TYPE_REQ_WORD0) {
+ /* Resynchronise */
+ vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
+ vf->req_seqno = seq + 1;
+ vf->req_addr = 0;
+ } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
+ goto error;
+
+ switch (vf->req_type) {
+ case VFDI_EV_TYPE_REQ_WORD0:
+ case VFDI_EV_TYPE_REQ_WORD1:
+ case VFDI_EV_TYPE_REQ_WORD2:
+ vf->req_addr |= (u64)data << (vf->req_type << 4);
+ ++vf->req_type;
+ return;
+
+ case VFDI_EV_TYPE_REQ_WORD3:
+ vf->req_addr |= (u64)data << 48;
+ vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
+ vf->busy = true;
+ queue_work(vfdi_workqueue, &vf->req);
+ return;
+ }
+
+error:
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Screaming VFDI request from %s\n",
+ vf->pci_name);
+ /* Reset the request and sequence number */
+ vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
+ vf->req_seqno = seq + 1;
+}
+
+void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
+{
+ struct efx_vf *vf;
+
+ if (vf_i > efx->vf_init_count)
+ return;
+ vf = efx->vf + vf_i;
+ netif_info(efx, hw, efx->net_dev,
+ "FLR on VF %s\n", vf->pci_name);
+
+ vf->status_addr = 0;
+ efx_vfdi_remove_all_filters(vf);
+ efx_vfdi_flush_clear(vf);
+
+ vf->evq0_count = 0;
+}
+
+void efx_sriov_mac_address_changed(struct efx_nic *efx)
+{
+ struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+
+ if (!efx->vf_init_count)
+ return;
+ memcpy(vfdi_status->peers[0].mac_addr,
+ efx->net_dev->dev_addr, ETH_ALEN);
+ queue_work(vfdi_workqueue, &efx->peer_work);
+}
+
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_vf *vf;
+ unsigned queue, qid;
+
+ queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (map_vi_index(efx, queue, &vf, &qid))
+ return;
+ /* Ignore flush completions triggered by an FLR */
+ if (!test_bit(qid, vf->txq_mask))
+ return;
+
+ __clear_bit(qid, vf->txq_mask);
+ --vf->txq_count;
+
+ if (efx_vfdi_flush_wake(vf))
+ wake_up(&vf->flush_waitq);
+}
+
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_vf *vf;
+ unsigned ev_failed, queue, qid;
+
+ queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ ev_failed = EFX_QWORD_FIELD(*event,
+ FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (map_vi_index(efx, queue, &vf, &qid))
+ return;
+ if (!test_bit(qid, vf->rxq_mask))
+ return;
+
+ if (ev_failed) {
+ set_bit(qid, vf->rxq_retry_mask);
+ atomic_inc(&vf->rxq_retry_count);
+ } else {
+ __clear_bit(qid, vf->rxq_mask);
+ --vf->rxq_count;
+ }
+ if (efx_vfdi_flush_wake(vf))
+ wake_up(&vf->flush_waitq);
+}
+
+/* Called from napi. Schedule the reset work item */
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
+{
+ struct efx_vf *vf;
+ unsigned int rel;
+
+ if (map_vi_index(efx, dmaq, &vf, &rel))
+ return;
+
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "VF %d DMA Q %d reports descriptor fetch error.\n",
+ vf->index, rel);
+ queue_work(vfdi_workqueue, &vf->reset_work);
+}
+
+/* Reset all VFs */
+void efx_sriov_reset(struct efx_nic *efx)
+{
+ unsigned int vf_i;
+ struct efx_buffer buf;
+ struct efx_vf *vf;
+
+ ASSERT_RTNL();
+
+ if (efx->vf_init_count == 0)
+ return;
+
+ efx_sriov_usrev(efx, true);
+ (void)efx_sriov_cmd(efx, true, NULL, NULL);
+
+ if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
+ return;
+
+ for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
+ vf = efx->vf + vf_i;
+ efx_sriov_reset_vf(vf, &buf);
+ }
+
+ efx_nic_free_buffer(efx, &buf);
+}
+
+int efx_init_sriov(void)
+{
+ /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
+ * efx_sriov_peer_work() spend almost all their time sleeping for
+ * MCDI to complete anyway
+ */
+ vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
+ if (!vfdi_workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void efx_fini_sriov(void)
+{
+ destroy_workqueue(vfdi_workqueue);
+}
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ mutex_lock(&vf->status_lock);
+ memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
+ __efx_sriov_update_vf_addr(vf);
+ mutex_unlock(&vf->status_lock);
+
+ return 0;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+ u16 vlan, u8 qos)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+ u16 tci;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ mutex_lock(&vf->status_lock);
+ tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
+ vf->addr.tci = htons(tci);
+ __efx_sriov_update_vf_addr(vf);
+ mutex_unlock(&vf->status_lock);
+
+ return 0;
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+ int rc;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ mutex_lock(&vf->txq_lock);
+ if (vf->txq_count == 0) {
+ vf->tx_filter_mode =
+ spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
+ rc = 0;
+ } else {
+ /* This cannot be changed while TX queues are running */
+ rc = -EBUSY;
+ }
+ mutex_unlock(&vf->txq_lock);
+ return rc;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+ u16 tci;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ ivi->vf = vf_i;
+ memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
+ ivi->tx_rate = 0;
+ tci = ntohs(vf->addr.tci);
+ ivi->vlan = tci & VLAN_VID_MASK;
+ ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
+ ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
+
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
index 71f2e3ebe1c7..5431a1bbff5c 100644
--- a/drivers/net/ethernet/sfc/spi.h
+++ b/drivers/net/ethernet/sfc/spi.h
@@ -68,7 +68,7 @@ static inline bool efx_spi_present(const struct efx_spi_device *spi)
int falcon_spi_cmd(struct efx_nic *efx,
const struct efx_spi_device *spi, unsigned int command,
- int address, const void* in, void *out, size_t len);
+ int address, const void *in, void *out, size_t len);
int falcon_spi_wait_write(struct efx_nic *efx,
const struct efx_spi_device *spi);
int falcon_spi_read(struct efx_nic *efx,
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index 7b0fd89e7b85..d37cb5017129 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -121,7 +121,7 @@
#define GPHY_XCONTROL_REG 49152
#define GPHY_ISOLATE_LBN 10
#define GPHY_ISOLATE_WIDTH 1
-#define GPHY_DUPLEX_LBN 8
+#define GPHY_DUPLEX_LBN 8
#define GPHY_DUPLEX_WIDTH 1
#define GPHY_LOOPBACK_NEAR_LBN 14
#define GPHY_LOOPBACK_NEAR_WIDTH 1
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 72f0fbc73b1a..94d0365b31cd 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -110,7 +110,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
* little benefit from using descriptors that cross those
* boundaries and we keep things simple by not doing so.
*/
- unsigned len = (~dma_addr & 0xfff) + 1;
+ unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
/* Work around hardware bug for unaligned buffers. */
if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
@@ -339,7 +339,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
* OS to free the skb.
*/
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
- struct net_device *net_dev)
+ struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
@@ -446,10 +446,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
- if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
- EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+ if (fill_level < EFX_TXQ_THRESHOLD(efx))
netif_tx_wake_queue(tx_queue->core_txq);
- }
}
/* Check whether the hardware queue is now empty */
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 7c21b334a75b..29bb3f9941c0 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -512,7 +512,7 @@ static bool txc43128_phy_poll(struct efx_nic *efx)
return efx->link_state.up != was_up;
}
-static const char *txc43128_test_names[] = {
+static const char *const txc43128_test_names[] = {
"bist"
};
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
new file mode 100644
index 000000000000..225557caaf5a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -0,0 +1,255 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef _VFDI_H
+#define _VFDI_H
+
+/**
+ * DOC: Virtual Function Driver Interface
+ *
+ * This file contains software structures used to form a two way
+ * communication channel between the VF driver and the PF driver,
+ * named Virtual Function Driver Interface (VFDI).
+ *
+ * For the purposes of VFDI, a page is a memory region with size and
+ * alignment of 4K. All addresses are DMA addresses to be used within
+ * the domain of the relevant VF.
+ *
+ * The only hardware-defined channels for a VF driver to communicate
+ * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
+ * registers). Writing to these registers generates an event with
+ * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
+ * and USER_EV_REG_VALUE set to the value written. The PF driver may
+ * direct or disable delivery of these events by setting
+ * %FR_CZ_USR_EV_CFG.
+ *
+ * The PF driver can send arbitrary events to arbitrary event queues.
+ * However, for consistency, VFDI events from the PF are defined to
+ * follow the same form and be sent to the first event queue assigned
+ * to the VF while that queue is enabled by the VF driver.
+ *
+ * The general form of the variable bits of VFDI events is:
+ *
+ * 0 16 24 31
+ * | DATA | TYPE | SEQ |
+ *
+ * SEQ is a sequence number which should be incremented by 1 (modulo
+ * 256) for each event. The sequence numbers used in each direction
+ * are independent.
+ *
+ * The VF submits requests of type &struct vfdi_req by sending the
+ * address of the request (ADDR) in a series of 4 events:
+ *
+ * 0 16 24 31
+ * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
+ * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
+ * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
+ * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
+ *
+ * The address must be page-aligned. After receiving such a valid
+ * series of events, the PF driver will attempt to read the request
+ * and write a response to the same address. In case of an invalid
+ * sequence of events or a DMA error, there will be no response.
+ *
+ * The VF driver may request that the PF driver writes status
+ * information into its domain asynchronously. After writing the
+ * status, the PF driver will send an event of the form:
+ *
+ * 0 16 24 31
+ * | reserved | VFDI_EV_TYPE_STATUS | SEQ |
+ *
+ * In case the VF must be reset for any reason, the PF driver will
+ * send an event of the form:
+ *
+ * 0 16 24 31
+ * | reserved | VFDI_EV_TYPE_RESET | SEQ |
+ *
+ * It is then the responsibility of the VF driver to request
+ * reinitialisation of its queues.
+ */
+#define VFDI_EV_SEQ_LBN 24
+#define VFDI_EV_SEQ_WIDTH 8
+#define VFDI_EV_TYPE_LBN 16
+#define VFDI_EV_TYPE_WIDTH 8
+#define VFDI_EV_TYPE_REQ_WORD0 0
+#define VFDI_EV_TYPE_REQ_WORD1 1
+#define VFDI_EV_TYPE_REQ_WORD2 2
+#define VFDI_EV_TYPE_REQ_WORD3 3
+#define VFDI_EV_TYPE_STATUS 4
+#define VFDI_EV_TYPE_RESET 5
+#define VFDI_EV_DATA_LBN 0
+#define VFDI_EV_DATA_WIDTH 16
+
+struct vfdi_endpoint {
+ u8 mac_addr[ETH_ALEN];
+ __be16 tci;
+};
+
+/**
+ * enum vfdi_op - VFDI operation enumeration
+ * @VFDI_OP_RESPONSE: Indicates a response to the request.
+ * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
+ * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
+ * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
+ * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
+ * finalize the SRAM entries.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
+ * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
+ * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
+ * from PF and write the initial status.
+ * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
+ * updates from PF.
+ */
+enum vfdi_op {
+ VFDI_OP_RESPONSE = 0,
+ VFDI_OP_INIT_EVQ = 1,
+ VFDI_OP_INIT_RXQ = 2,
+ VFDI_OP_INIT_TXQ = 3,
+ VFDI_OP_FINI_ALL_QUEUES = 4,
+ VFDI_OP_INSERT_FILTER = 5,
+ VFDI_OP_REMOVE_ALL_FILTERS = 6,
+ VFDI_OP_SET_STATUS_PAGE = 7,
+ VFDI_OP_CLEAR_STATUS_PAGE = 8,
+ VFDI_OP_LIMIT,
+};
+
+/* Response codes for VFDI operations. Other values may be used in future. */
+#define VFDI_RC_SUCCESS 0
+#define VFDI_RC_ENOMEM (-12)
+#define VFDI_RC_EINVAL (-22)
+#define VFDI_RC_EOPNOTSUPP (-95)
+#define VFDI_RC_ETIMEDOUT (-110)
+
+/**
+ * struct vfdi_req - Request from VF driver to PF driver
+ * @op: Operation code or response indicator, taken from &enum vfdi_op.
+ * @rc: Response code. Set to 0 on success or a negative error code on failure.
+ * @u.init_evq.index: Index of event queue to create.
+ * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
+ * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
+ * address of each page backing the event queue.
+ * @u.init_rxq.index: Index of receive queue to create.
+ * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
+ * @u.init_rxq.evq: Instance of event queue to target receive events at.
+ * @u.init_rxq.label: Label used in receive events.
+ * @u.init_rxq.flags: Unused.
+ * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
+ * address of each page backing the receive queue.
+ * @u.init_txq.index: Index of transmit queue to create.
+ * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
+ * @u.init_txq.evq: Instance of event queue to target transmit completion
+ * events at.
+ * @u.init_txq.label: Label used in transmit completion events.
+ * @u.init_txq.flags: Checksum offload flags.
+ * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
+ * address of each page backing the transmit queue.
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
+ * all traffic at this receive queue.
+ * @u.mac_filter.flags: MAC filter flags.
+ * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
+ * This address must be page-aligned and the PF may write up to a
+ * whole page (allowing for extension of the structure).
+ * @u.set_status_page.peer_page_count: Number of additional pages the VF
+ * has provided into which peer addresses may be DMAd.
+ * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
+ * If the number of peers exceeds 256, then the VF must provide
+ * additional pages in this array. The PF will then DMA up to
+ * 512 vfdi_endpoint structures into each page. These addresses
+ * must be page-aligned.
+ */
+struct vfdi_req {
+ u32 op;
+ u32 reserved1;
+ s32 rc;
+ u32 reserved2;
+ union {
+ struct {
+ u32 index;
+ u32 buf_count;
+ u64 addr[];
+ } init_evq;
+ struct {
+ u32 index;
+ u32 buf_count;
+ u32 evq;
+ u32 label;
+ u32 flags;
+#define VFDI_RXQ_FLAG_SCATTER_EN 1
+ u32 reserved;
+ u64 addr[];
+ } init_rxq;
+ struct {
+ u32 index;
+ u32 buf_count;
+ u32 evq;
+ u32 label;
+ u32 flags;
+#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
+#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
+ u32 reserved;
+ u64 addr[];
+ } init_txq;
+ struct {
+ u32 rxq;
+ u32 flags;
+#define VFDI_MAC_FILTER_FLAG_RSS 1
+#define VFDI_MAC_FILTER_FLAG_SCATTER 2
+ } mac_filter;
+ struct {
+ u64 dma_addr;
+ u64 peer_page_count;
+ u64 peer_page_addr[];
+ } set_status_page;
+ } u;
+};
+
+/**
+ * struct vfdi_status - Status provided by PF driver to VF driver
+ * @generation_start: A generation count DMA'd to VF *before* the
+ * rest of the structure.
+ * @generation_end: A generation count DMA'd to VF *after* the
+ * rest of the structure.
+ * @version: Version of this structure; currently set to 1. Later
+ * versions must either be layout-compatible or only be sent to VFs
+ * that specifically request them.
+ * @length: Total length of this structure including embedded tables
+ * @vi_scale: log2 the number of VIs available on this VF. This quantity
+ * is used by the hardware for register decoding.
+ * @max_tx_channels: The maximum number of transmit queues the VF can use.
+ * @rss_rxq_count: The number of receive queues present in the shared RSS
+ * indirection table.
+ * @peer_count: Total number of peers in the complete peer list. If larger
+ * than ARRAY_SIZE(%peers), then the VF must provide sufficient
+ * additional pages each of which is filled with vfdi_endpoint structures.
+ * @local: The MAC address and outer VLAN tag of *this* VF
+ * @peers: Table of peer addresses. The @tci fields in these structures
+ * are currently unused and must be ignored. Additional peers are
+ * written into any additional pages provided by the VF.
+ * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
+ * for interrupt moderation timers, in nanoseconds. This member is only
+ * present if @length is sufficiently large.
+ */
+struct vfdi_status {
+ u32 generation_start;
+ u32 generation_end;
+ u32 version;
+ u32 length;
+ u8 vi_scale;
+ u8 max_tx_channels;
+ u8 rss_rxq_count;
+ u8 reserved1;
+ u16 peer_count;
+ u16 reserved2;
+ struct vfdi_endpoint local;
+ struct vfdi_endpoint peers[256];
+
+ /* Members below here extend version 1 of this structure */
+ u32 timer_quantum_ns;
+};
+
+#endif
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 5b118cd5bf94..a9deda8eaf63 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1462,8 +1462,6 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
- if (netif_msg_drv(&debug))
- pr_err("unable to alloc new ethernet\n");
rc = -ENOMEM;
goto err_out_0;
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index c8efc708c792..5ccf02e7e3ad 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -527,7 +527,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
ret = sis900_get_mac_addr(pci_dev, net_dev);
if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
- random_ether_addr(net_dev->dev_addr);
+ eth_hw_addr_random(net_dev);
printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
"using random generated one\n", dev_name);
}
@@ -619,7 +619,6 @@ static int __devinit sis900_mii_probe(struct net_device * net_dev)
}
if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
- printk(KERN_WARNING "Cannot allocate mem for struct mii_phy\n");
mii_phy = sis_priv->first_mii;
while (mii_phy) {
struct mii_phy *phy;
@@ -1167,7 +1166,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
for (i = 0; i < NUM_RX_DESC; i++) {
struct sk_buff *skb;
- if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
/* not enough memory for skbuff, this makes a "hole"
on the buffer ring, it is not clear how the
hardware will react to this kind of degenerated
@@ -1770,7 +1769,7 @@ static int sis900_rx(struct net_device *net_dev)
/* refill the Rx buffer, what if there is not enough
* memory for new socket buffer ?? */
- if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
/*
* Not enough memory to refill the buffer
* so we need to recycle the old one so
@@ -1828,7 +1827,7 @@ refill_rx_ring:
entry = sis_priv->dirty_rx % NUM_RX_DESC;
if (sis_priv->rx_skbuff[entry] == NULL) {
- if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
/* not enough memory for skbuff, this makes a
* "hole" on the buffer ring, it is not clear
* how the hardware will react to this kind
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2c077ce0b6d6..2a662e6112e9 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -363,10 +363,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ret = -ENOMEM;
dev = alloc_etherdev(sizeof (*ep));
- if (!dev) {
- dev_err(&pdev->dev, "no memory for eth device\n");
+ if (!dev)
goto err_out_free_res;
- }
+
SET_NETDEV_DEV(dev, &pdev->dev);
#ifdef USE_IO_OPS
@@ -935,7 +934,7 @@ static void epic_init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
ep->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1200,7 +1199,7 @@ static int epic_rx(struct net_device *dev, int budget)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(ep->pci_dev,
ep->rx_ring[entry].bufaddr,
@@ -1233,7 +1232,7 @@ static int epic_rx(struct net_device *dev, int budget)
entry = ep->dirty_rx % RX_RING_SIZE;
if (ep->rx_skbuff[entry] == NULL) {
struct sk_buff *skb;
- skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
+ skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 313ba3b32ab4..8814b2f5d46f 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -401,7 +401,7 @@ static inline void smc911x_rcv(struct net_device *dev)
} else {
/* Receive a valid packet */
/* Alloc a buffer with extra room for DMA alignment */
- skb=dev_alloc_skb(pkt_len+32);
+ skb = netdev_alloc_skb(dev, pkt_len+32);
if (unlikely(skb == NULL)) {
PRINTK( "%s: Low memory, rcvd packet dropped.\n",
dev->name);
@@ -2065,7 +2065,6 @@ static int __devinit smc911x_drv_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct smc911x_local));
if (!ndev) {
- printk("%s: could not allocate device.\n", CARDNAME);
ret = -ENOMEM;
goto release_1;
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 4e45094efb18..50823da9dc1e 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1222,7 +1222,7 @@ static void smc_rcv(struct net_device *dev)
if ( status & RS_MULTICAST )
dev->stats.multicast++;
- skb = dev_alloc_skb( packet_length + 5);
+ skb = netdev_alloc_skb(dev, packet_length + 5);
if ( skb == NULL ) {
printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index ada927aba7a5..d12e48a7861d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1500,7 +1500,7 @@ static void smc_rx(struct net_device *dev)
struct sk_buff *skb;
/* Note: packet_length adds 5 or 6 extra bytes here! */
- skb = dev_alloc_skb(packet_length+2);
+ skb = netdev_alloc_skb(dev, packet_length+2);
if (skb == NULL) {
pr_debug("%s: Low memory, packet dropped.\n", dev->name);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 64ad3ed74495..1dc4fad593e7 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -463,7 +463,7 @@ static inline void smc_rcv(struct net_device *dev)
* multiple of 4 bytes on 32 bit buses.
* Hence packet_len - 6 + 2 + 2 + 2.
*/
- skb = dev_alloc_skb(packet_len);
+ skb = netdev_alloc_skb(dev, packet_len);
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
dev->name);
@@ -2223,7 +2223,6 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct smc_local));
if (!ndev) {
- printk("%s: could not allocate device.\n", CARDNAME);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 24d2df068d71..4a6971027076 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1833,6 +1833,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
spin_lock_irq(&pdata->mac_lock);
@@ -2374,7 +2375,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
dev = alloc_etherdev(sizeof(struct smsc911x_data));
if (!dev) {
- pr_warn("Could not allocate device\n");
retval = -ENOMEM;
goto out_release_io_1;
}
@@ -2486,7 +2486,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
"Mac Address is read from LAN911x EEPROM");
} else {
/* eeprom values are invalid, generate random MAC */
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
SMSC_TRACE(pdata, probe,
"MAC Address is set to random_ether_addr");
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index a9efbdfe5302..38386478532b 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -509,10 +509,9 @@ static void smsc9420_check_mac_address(struct net_device *dev)
smsc_dbg(PROBE, "Mac Address is read from EEPROM");
} else {
/* eeprom values are invalid, generate random MAC */
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
smsc9420_set_mac_address(dev);
- smsc_dbg(PROBE,
- "MAC Address is set to random_ether_addr");
+ smsc_dbg(PROBE, "MAC Address is set to random");
}
}
}
@@ -850,8 +849,6 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
return -ENOMEM;
}
- skb->dev = pd->dev;
-
mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pd->pdev, mapping)) {
@@ -1598,10 +1595,8 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
dev = alloc_etherdev(sizeof(*pd));
- if (!dev) {
- printk(KERN_ERR "ether device alloc failed\n");
+ if (!dev)
goto out_disable_pci_device_1;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6ee593a55a64..e85ffbd54830 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -878,7 +878,7 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
priv->dev->base_addr,
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
- random_ether_addr(priv->dev->dev_addr);
+ eth_hw_addr_random(priv->dev);
}
pr_warning("%s: device MAC address %pM\n", priv->dev->name,
priv->dev->dev_addr);
@@ -910,10 +910,9 @@ static int stmmac_open(struct net_device *dev)
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
- if (unlikely(priv->tm == NULL)) {
- pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
+ if (unlikely(priv->tm == NULL))
return -ENOMEM;
- }
+
priv->tm->freq = tmrate;
/* Test if the external timer can be actually used.
@@ -1801,10 +1800,8 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct stmmac_priv *priv;
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
- if (!ndev) {
- pr_err("%s: ERROR: allocating the device\n", __func__);
+ if (!ndev)
return NULL;
- }
SET_NETDEV_DEV(ndev, device);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 50ad5b80cfaf..da66ed7c3c5d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -85,7 +85,7 @@ static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
continue;
addr = pci_iomap(pdev, i, 0);
if (addr == NULL) {
- pr_err("%s: ERROR: cannot map regiser memory, aborting",
+ pr_err("%s: ERROR: cannot map register memory, aborting",
__func__);
ret = -EIO;
goto err_out_map_failed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3aad9810237c..116529a366b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -24,8 +24,48 @@
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
#include "stmmac.h"
+#ifdef CONFIG_OF
+static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const char **mac)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ *mac = of_get_mac_address(np);
+ plat->interface = of_get_phy_mode(np);
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_mdio_bus_data),
+ GFP_KERNEL);
+
+ /*
+ * Currently only the properties needed on SPEAr600
+ * are provided. All other properties should be added
+ * once needed on other platforms.
+ */
+ if (of_device_is_compatible(np, "st,spear600-gmac")) {
+ plat->pbl = 8;
+ plat->has_gmac = 1;
+ plat->pmt = 1;
+ }
+
+ return 0;
+}
+#else
+static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const char **mac)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
/**
* stmmac_pltfr_probe
* @pdev: platform device pointer
@@ -39,7 +79,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr = NULL;
struct stmmac_priv *priv = NULL;
- struct plat_stmmacenet_data *plat_dat;
+ struct plat_stmmacenet_data *plat_dat = NULL;
+ const char *mac = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -58,7 +99,25 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out_release_region;
}
- plat_dat = pdev->dev.platform_data;
+
+ if (pdev->dev.of_node) {
+ plat_dat = devm_kzalloc(&pdev->dev,
+ sizeof(struct plat_stmmacenet_data),
+ GFP_KERNEL);
+ if (!plat_dat) {
+ pr_err("%s: ERROR: no memory", __func__);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+ if (ret) {
+ pr_err("%s: main dt probe failed", __func__);
+ goto out_unmap;
+ }
+ } else {
+ plat_dat = pdev->dev.platform_data;
+ }
/* Custom initialisation (if needed)*/
if (plat_dat->init) {
@@ -73,6 +132,10 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
goto out_unmap;
}
+ /* Get MAC address if available (DT) */
+ if (mac)
+ memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
+
/* Get the MAC information */
priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
if (priv->dev->irq == -ENXIO) {
@@ -178,6 +241,12 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
+static const struct of_device_id stmmac_dt_ids[] = {
+ { .compatible = "st,spear600-gmac", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+
static struct platform_driver stmmac_driver = {
.probe = stmmac_pltfr_probe,
.remove = stmmac_pltfr_remove,
@@ -185,6 +254,7 @@ static struct platform_driver stmmac_driver = {
.name = STMMAC_RESOURCE_NAME,
.owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(stmmac_dt_ids),
},
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index f10665f594c4..b36edbd625dd 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -835,7 +835,6 @@ static int cas_saturn_firmware_init(struct cas *cp)
cp->fw_data = vmalloc(cp->fw_size);
if (!cp->fw_data) {
err = -ENOMEM;
- pr_err("\"%s\" Failed %d\n", fw_name, err);
goto out;
}
memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@ -1975,7 +1974,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
else
alloclen = max(hlen, RX_COPY_MIN);
- skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
+ skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
if (skb == NULL)
return -1;
@@ -4947,7 +4946,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index cf433931304f..c99b3b0e2eae 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6412,7 +6412,7 @@ static int niu_set_mac_addr(struct net_device *dev, void *p)
unsigned long flags;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -9685,10 +9685,8 @@ static struct net_device * __devinit niu_alloc_and_init(
struct niu *np;
dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
- if (!dev) {
- dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
+ if (!dev)
return NULL;
- }
SET_NETDEV_DEV(dev, gen_dev);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 220f724c3377..f359863b5340 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -853,7 +853,7 @@ static void bigmac_rx(struct bigmac *bp)
/* Trim the original skb for the netif. */
skb_trim(skb, len);
} else {
- struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+ struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2);
if (copy_skb == NULL) {
drops++;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 31441a870b0b..ba041596e046 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2885,7 +2885,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*gp));
if (!dev) {
- pr_err("Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_disable_device;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 09c518655db2..8b627e2f798d 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2043,7 +2043,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
/* Trim the original skb for the netif. */
skb_trim(skb, len);
} else {
- struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+ struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
if (copy_skb == NULL) {
drops++;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b28f74367ebe..139d6b410d68 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -435,7 +435,7 @@ static void qe_rx(struct sunqe *qep)
dev->stats.rx_length_errors++;
dev->stats.rx_dropped++;
} else {
- skb = dev_alloc_skb(len + 2);
+ skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
drops++;
dev->stats.rx_dropped++;
@@ -907,14 +907,8 @@ static int __devinit qec_ether_init(struct platform_device *op)
dev_set_drvdata(&op->dev, qe);
- printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
- for (i = 0; i < 6; i++)
- printk ("%2.2x%c",
- dev->dev_addr[i],
- i == 5 ? ' ': ':');
- printk("\n");
-
-
+ printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
+ dev->dev_addr);
return 0;
fail:
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 8c6c059f3489..92a037a8228a 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -949,10 +949,9 @@ static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
int map_len = (ETH_FRAME_LEN + 7) & ~7;
err = -ENOMEM;
- if (!buf) {
- pr_err("TX buffer allocation failure\n");
+ if (!buf)
goto err_out;
- }
+
err = -EFAULT;
if ((unsigned long)buf & (8UL - 1)) {
pr_err("TX buffer misaligned\n");
@@ -1027,10 +1026,8 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
int err, i;
dev = alloc_etherdev(sizeof(*vp));
- if (!dev) {
- pr_err("Etherdev alloc failed, aborting\n");
+ if (!dev)
return ERR_PTR(-ENOMEM);
- }
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
@@ -1165,10 +1162,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
port = kzalloc(sizeof(*port), GFP_KERNEL);
err = -ENOMEM;
- if (!port) {
- pr_err("Cannot allocate vnet_port\n");
+ if (!port)
goto err_out_put_mdesc;
- }
for (i = 0; i < ETH_ALEN; i++)
port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 4b19e9b0606b..ad973ffc9ff3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1089,12 +1089,11 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
ENTER;
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
- skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
if (!skb) {
- pr_err("NO MEM: dev_alloc_skb failed\n");
+ pr_err("NO MEM: netdev_alloc_skb failed\n");
break;
}
- skb->dev = priv->ndev;
skb_reserve(skb, NET_IP_ALIGN);
idx = bdx_rxdb_alloc_elem(db);
@@ -1258,7 +1257,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
skb = dm->skb;
if (len < BDX_COPYBREAK &&
- (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
+ (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
skb_reserve(skb2, NET_IP_ALIGN);
/*skb_put(skb2, len); */
pci_dma_sync_single_for_cpu(priv->pdev,
@@ -1978,7 +1977,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev = alloc_etherdev(sizeof(struct bdx_priv));
if (!ndev) {
err = -ENOMEM;
- pr_err("alloc_etherdev failed\n");
goto err_out_iomap;
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de76c70ec8fb..b42252c4bec8 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,6 +49,17 @@ config TI_DAVINCI_CPDMA
To compile this driver as a module, choose M here: the module
will be called davinci_cpdma. This is recommended.
+config TI_CPSW
+ tristate "TI CPSW Switch Support"
+ depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
+ select TI_DAVINCI_CPDMA
+ select TI_DAVINCI_MDIO
+ ---help---
+ This driver supports TI's CPSW Ethernet Switch.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cpsw.
+
config TLAN
tristate "TI ThunderLAN support"
depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index aedb3af74e5a..91bd8bba78ff 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
+ti_cpsw-y := cpsw_ale.o cpsw.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index cbc8df78d84b..860c2526f08d 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1143,11 +1143,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
}
dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
-
- if (!dev) {
- printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
+ if (!dev)
return -ENOMEM;
- }
platform_set_drvdata(pdev, dev);
priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
new file mode 100644
index 000000000000..6685bbb5705a
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -0,0 +1,1019 @@
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+#include <linux/platform_data/cpsw.h>
+
+#include "cpsw_ale.h"
+#include "davinci_cpdma.h"
+
+#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
+ NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
+ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
+ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_RX_STATUS)
+
+#define cpsw_info(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_info(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_err(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_err(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_dbg(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_dbg(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_notice(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_notice(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
+#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
+#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+
+#define CPDMA_RXTHRESH 0x0c0
+#define CPDMA_RXFREE 0x0e0
+#define CPDMA_TXHDP 0x00
+#define CPDMA_RXHDP 0x20
+#define CPDMA_TXCP 0x40
+#define CPDMA_RXCP 0x60
+
+#define cpsw_dma_regs(base, offset) \
+ (void __iomem *)((base) + (offset))
+#define cpsw_dma_rxthresh(base, offset) \
+ (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
+#define cpsw_dma_rxfree(base, offset) \
+ (void __iomem *)((base) + (offset) + CPDMA_RXFREE)
+#define cpsw_dma_txhdp(base, offset) \
+ (void __iomem *)((base) + (offset) + CPDMA_TXHDP)
+#define cpsw_dma_rxhdp(base, offset) \
+ (void __iomem *)((base) + (offset) + CPDMA_RXHDP)
+#define cpsw_dma_txcp(base, offset) \
+ (void __iomem *)((base) + (offset) + CPDMA_TXCP)
+#define cpsw_dma_rxcp(base, offset) \
+ (void __iomem *)((base) + (offset) + CPDMA_RXCP)
+
+#define CPSW_POLL_WEIGHT 64
+#define CPSW_MIN_PACKET_SIZE 60
+#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
+
+#define RX_PRIORITY_MAPPING 0x76543210
+#define TX_PRIORITY_MAPPING 0x33221100
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
+
+#define cpsw_enable_irq(priv) \
+ do { \
+ u32 i; \
+ for (i = 0; i < priv->num_irqs; i++) \
+ enable_irq(priv->irqs_table[i]); \
+ } while (0);
+#define cpsw_disable_irq(priv) \
+ do { \
+ u32 i; \
+ for (i = 0; i < priv->num_irqs; i++) \
+ disable_irq_nosync(priv->irqs_table[i]); \
+ } while (0);
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
+
+static int ale_ageout = 10;
+module_param(ale_ageout, int, 0);
+MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
+
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+module_param(rx_packet_max, int, 0);
+MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
+
+struct cpsw_ss_regs {
+ u32 id_ver;
+ u32 soft_reset;
+ u32 control;
+ u32 int_control;
+ u32 rx_thresh_en;
+ u32 rx_en;
+ u32 tx_en;
+ u32 misc_en;
+};
+
+struct cpsw_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+};
+
+struct cpsw_slave_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 flow_thresh;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 sa_lo;
+ u32 sa_hi;
+};
+
+struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 flow_thresh;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+ u32 cpdma_rx_chan_map;
+};
+
+struct cpsw_sliver_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 __reserved_1;
+ u32 rx_pri_map;
+};
+
+struct cpsw_slave {
+ struct cpsw_slave_regs __iomem *regs;
+ struct cpsw_sliver_regs __iomem *sliver;
+ int slave_num;
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+ struct phy_device *phy;
+};
+
+struct cpsw_priv {
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ struct resource *cpsw_res;
+ struct resource *cpsw_ss_res;
+ struct napi_struct napi;
+ struct device *dev;
+ struct cpsw_platform_data data;
+ struct cpsw_regs __iomem *regs;
+ struct cpsw_ss_regs __iomem *ss_regs;
+ struct cpsw_host_regs __iomem *host_port_regs;
+ u32 msg_enable;
+ struct net_device_stats stats;
+ int rx_packet_max;
+ int host_port;
+ struct clk *clk;
+ u8 mac_addr[ETH_ALEN];
+ struct cpsw_slave *slaves;
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txch, *rxch;
+ struct cpsw_ale *ale;
+ /* snapshot of IRQ numbers */
+ u32 irqs_table[4];
+ u32 num_irqs;
+};
+
+#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
+#define for_each_slave(priv, func, arg...) \
+ do { \
+ int idx; \
+ for (idx = 0; idx < (priv)->data.slaves; idx++) \
+ (func)((priv)->slaves + idx, ##arg); \
+ } while (0)
+
+static void cpsw_intr_enable(struct cpsw_priv *priv)
+{
+ __raw_writel(0xFF, &priv->ss_regs->tx_en);
+ __raw_writel(0xFF, &priv->ss_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(priv->dma, true);
+ return;
+}
+
+static void cpsw_intr_disable(struct cpsw_priv *priv)
+{
+ __raw_writel(0, &priv->ss_regs->tx_en);
+ __raw_writel(0, &priv->ss_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ return;
+}
+
+void cpsw_tx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_start_queue(ndev);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+}
+
+void cpsw_rx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev)) ||
+ unlikely(!netif_carrier_ok(ndev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ if (likely(status >= 0)) {
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ priv->stats.rx_bytes += len;
+ priv->stats.rx_packets++;
+ skb = NULL;
+ }
+
+ if (unlikely(!netif_running(ndev))) {
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
+ if (WARN_ON(!skb))
+ return;
+
+ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ }
+ WARN_ON(ret < 0);
+}
+
+static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_priv *priv = dev_id;
+
+ if (likely(netif_running(priv->ndev))) {
+ cpsw_intr_disable(priv);
+ cpsw_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
+static int cpsw_poll(struct napi_struct *napi, int budget)
+{
+ struct cpsw_priv *priv = napi_to_priv(napi);
+ int num_tx, num_rx;
+
+ num_tx = cpdma_chan_process(priv->txch, 128);
+ num_rx = cpdma_chan_process(priv->rxch, budget);
+
+ if (num_rx || num_tx)
+ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+ num_rx, num_tx);
+
+ if (num_rx < budget) {
+ napi_complete(napi);
+ cpsw_intr_enable(priv);
+ cpdma_ctlr_eoi(priv->dma);
+ cpsw_enable_irq(priv);
+ }
+
+ return num_rx;
+}
+
+static inline void soft_reset(const char *module, void __iomem *reg)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ __raw_writel(1, reg);
+ do {
+ cpu_relax();
+ } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
+
+ WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
+}
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+static void cpsw_set_slave_mac(struct cpsw_slave *slave,
+ struct cpsw_priv *priv)
+{
+ __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
+ __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
+}
+
+static void _cpsw_adjust_link(struct cpsw_slave *slave,
+ struct cpsw_priv *priv, bool *link)
+{
+ struct phy_device *phy = slave->phy;
+ u32 mac_control = 0;
+ u32 slave_port;
+
+ if (!phy)
+ return;
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
+ if (phy->link) {
+ mac_control = priv->data.mac_control;
+
+ /* enable forwarding */
+ cpsw_ale_control_set(priv->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ if (phy->speed == 1000)
+ mac_control |= BIT(7); /* GIGABITEN */
+ if (phy->duplex)
+ mac_control |= BIT(0); /* FULLDUPLEXEN */
+ *link = true;
+ } else {
+ mac_control = 0;
+ /* disable forwarding */
+ cpsw_ale_control_set(priv->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ }
+
+ if (mac_control != slave->mac_control) {
+ phy_print_status(phy);
+ __raw_writel(mac_control, &slave->sliver->mac_control);
+ }
+
+ slave->mac_control = mac_control;
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ bool link = false;
+
+ for_each_slave(priv, _cpsw_adjust_link, priv, &link);
+
+ if (link) {
+ netif_carrier_on(ndev);
+ if (netif_running(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+ }
+}
+
+static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
+{
+ static char *leader = "........................................";
+
+ if (!val)
+ return 0;
+ else
+ return snprintf(buf, maxlen, "%s %s %10d\n", name,
+ leader + strlen(name), val);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ char name[32];
+ u32 slave_port;
+
+ sprintf(name, "slave-%d", slave->slave_num);
+
+ soft_reset(name, &slave->sliver->soft_reset);
+
+ /* setup priority mapping */
+ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+ __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map);
+
+ /* setup max packet size, and mac address */
+ __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << slave_port, 0, ALE_MCAST_FWD_2);
+
+ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (IS_ERR(slave->phy)) {
+ dev_err(priv->dev, "phy %s not found on slave %d\n",
+ slave->data->phy_id, slave->slave_num);
+ slave->phy = NULL;
+ } else {
+ dev_info(priv->dev, "phy found : id is : 0x%x\n",
+ slave->phy->phy_id);
+ phy_start(slave->phy);
+ }
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &priv->regs->soft_reset);
+ cpsw_ale_start(priv->ale);
+
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
+
+ /* setup host port priority mapping */
+ __raw_writel(CPDMA_TX_PRIORITY_MAP,
+ &priv->host_port_regs->cpdma_tx_pri_map);
+ __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
+
+ cpsw_ale_control_set(priv->ale, priv->host_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int i, ret;
+ u32 reg;
+
+ cpsw_intr_disable(priv);
+ netif_carrier_off(ndev);
+
+ ret = clk_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(priv->dev, "unable to turn on device clock\n");
+ return ret;
+ }
+
+ reg = __raw_readl(&priv->regs->id_ver);
+
+ dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
+ CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
+ CPSW_RTL_VERSION(reg));
+
+ /* initialize host and slave ports */
+ cpsw_init_host_port(priv);
+ for_each_slave(priv, cpsw_slave_open, priv);
+
+ /* setup tx dma to fixed prio and zero offset */
+ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+
+ /* disable priority elevation and enable statistics on all ports */
+ __raw_writel(0, &priv->regs->ptype);
+
+ /* enable statistics collection only on the host port */
+ __raw_writel(0x7, &priv->regs->stat_port_en);
+
+ if (WARN_ON(!priv->data.rx_descs))
+ priv->data.rx_descs = 128;
+
+ for (i = 0; i < priv->data.rx_descs; i++) {
+ struct sk_buff *skb;
+
+ ret = -ENOMEM;
+ skb = netdev_alloc_skb_ip_align(priv->ndev,
+ priv->rx_packet_max);
+ if (!skb)
+ break;
+ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+ /* continue even if we didn't manage to submit all receive descs */
+ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
+
+ cpdma_ctlr_start(priv->dma);
+ cpsw_intr_enable(priv);
+ napi_enable(&priv->napi);
+ cpdma_ctlr_eoi(priv->dma);
+
+ return 0;
+}
+
+static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ if (!slave->phy)
+ return;
+ phy_stop(slave->phy);
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ cpsw_info(priv, ifdown, "shutting down cpsw device\n");
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
+ netif_stop_queue(priv->ndev);
+ napi_disable(&priv->napi);
+ netif_carrier_off(priv->ndev);
+ cpsw_ale_stop(priv->ale);
+ for_each_slave(priv, cpsw_slave_stop, priv);
+ clk_disable(priv->clk);
+ return 0;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ndev->trans_start = jiffies;
+
+ if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ cpsw_err(priv, tx_err, "packet pad failed\n");
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ ret = cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, GFP_KERNEL);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
+ return NETDEV_TX_OK;
+fail:
+ priv->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+}
+
+static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
+{
+ /*
+ * The switch cannot operate in promiscuous mode without substantial
+ * headache. For promiscuous mode to work, we would need to put the
+ * ALE in bypass mode and route all traffic to the host port.
+ * Subsequently, the host will need to operate as a "bridge", learn,
+ * and flood as needed. For now, we simply complain here and
+ * do nothing about it :-)
+ */
+ if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
+ dev_err(&ndev->dev, "promiscuity ignored!\n");
+
+ /*
+ * The switch cannot filter multicast traffic unless it is configured
+ * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
+ * whole bunch of additional logic that this driver does not implement
+ * at present.
+ */
+ if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
+ dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
+}
+
+static void cpsw_ndo_tx_timeout(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
+ priv->stats.tx_errors++;
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_chan_stop(priv->txch);
+ cpdma_chan_start(priv->txch);
+ cpdma_ctlr_int_ctrl(priv->dma, true);
+ cpsw_intr_enable(priv);
+ cpdma_ctlr_eoi(priv->dma);
+}
+
+static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ return &priv->stats;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpsw_interrupt(ndev->irq, priv);
+ cpdma_ctlr_int_ctrl(priv->dma, true);
+ cpsw_intr_enable(priv);
+ cpdma_ctlr_eoi(priv->dma);
+}
+#endif
+
+static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+ .ndo_start_xmit = cpsw_ndo_start_xmit,
+ .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
+ .ndo_get_stats = cpsw_ndo_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+#endif
+};
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ strcpy(info->driver, "TI CPSW Driver v1.0");
+ strcpy(info->version, "1.0");
+ strcpy(info->bus_info, priv->pdev->name);
+}
+
+static u32 cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ return priv->msg_enable;
+}
+
+static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ priv->msg_enable = value;
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ void __iomem *regs = priv->regs;
+ int slave_num = slave->slave_num;
+ struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
+
+ slave->data = data;
+ slave->regs = regs + data->slave_reg_ofs;
+ slave->sliver = regs + data->sliver_reg_ofs;
+}
+
+static int __devinit cpsw_probe(struct platform_device *pdev)
+{
+ struct cpsw_platform_data *data = pdev->dev.platform_data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv;
+ struct cpdma_params dma_params;
+ struct cpsw_ale_params ale_params;
+ void __iomem *regs;
+ struct resource *res;
+ int ret = 0, i, k = 0;
+
+ if (!data) {
+ pr_err("platform data missing\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ if (!ndev) {
+ pr_err("error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->lock);
+ priv->data = *data;
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->dev = &ndev->dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->rx_packet_max = max(rx_packet_max, 128);
+
+ if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
+ memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
+ pr_info("Detected MACID = %pM", priv->mac_addr);
+ } else {
+ random_ether_addr(priv->mac_addr);
+ pr_info("Random MACID = %pM", priv->mac_addr);
+ }
+
+ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+ priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
+ GFP_KERNEL);
+ if (!priv->slaves) {
+ ret = -EBUSY;
+ goto clean_ndev_ret;
+ }
+ for (i = 0; i < data->slaves; i++)
+ priv->slaves[i].slave_num = i;
+
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(priv->dev, "failed to get device clock)\n");
+ ret = -EBUSY;
+ }
+
+ priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!priv->cpsw_res) {
+ dev_err(priv->dev, "error getting i/o resource\n");
+ ret = -ENOENT;
+ goto clean_clk_ret;
+ }
+
+ if (!request_mem_region(priv->cpsw_res->start,
+ resource_size(priv->cpsw_res), ndev->name)) {
+ dev_err(priv->dev, "failed request i/o region\n");
+ ret = -ENXIO;
+ goto clean_clk_ret;
+ }
+
+ regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
+ if (!regs) {
+ dev_err(priv->dev, "unable to map i/o region\n");
+ goto clean_cpsw_iores_ret;
+ }
+ priv->regs = regs;
+ priv->host_port = data->host_port_num;
+ priv->host_port_regs = regs + data->host_port_reg_ofs;
+
+ priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!priv->cpsw_ss_res) {
+ dev_err(priv->dev, "error getting i/o resource\n");
+ ret = -ENOENT;
+ goto clean_clk_ret;
+ }
+
+ if (!request_mem_region(priv->cpsw_ss_res->start,
+ resource_size(priv->cpsw_ss_res), ndev->name)) {
+ dev_err(priv->dev, "failed request i/o region\n");
+ ret = -ENXIO;
+ goto clean_clk_ret;
+ }
+
+ regs = ioremap(priv->cpsw_ss_res->start,
+ resource_size(priv->cpsw_ss_res));
+ if (!regs) {
+ dev_err(priv->dev, "unable to map i/o region\n");
+ goto clean_cpsw_ss_iores_ret;
+ }
+ priv->ss_regs = regs;
+
+ for_each_slave(priv, cpsw_slave_init, priv);
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = &pdev->dev;
+ dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs,
+ data->cpdma_reg_ofs);
+ dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs,
+ data->cpdma_reg_ofs);
+ dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs,
+ data->cpdma_reg_ofs);
+ dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs,
+ data->cpdma_sram_ofs);
+ dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs,
+ data->cpdma_sram_ofs);
+ dma_params.txcp = cpsw_dma_txcp((u32)priv->regs,
+ data->cpdma_sram_ofs);
+ dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs,
+ data->cpdma_sram_ofs);
+
+ dma_params.num_chan = data->channels;
+ dma_params.has_soft_reset = true;
+ dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
+ dma_params.desc_mem_size = data->bd_ram_size;
+ dma_params.desc_align = 16;
+ dma_params.has_ext_regs = true;
+ dma_params.desc_mem_phys = data->no_bd_ram ? 0 :
+ (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
+ dma_params.desc_hw_addr = data->hw_ram_addr ?
+ data->hw_ram_addr : dma_params.desc_mem_phys ;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(priv->dev, "error initializing dma\n");
+ ret = -ENOMEM;
+ goto clean_iomap_ret;
+ }
+
+ priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
+ cpsw_tx_handler);
+ priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
+ cpsw_rx_handler);
+
+ if (WARN_ON(!priv->txch || !priv->rxch)) {
+ dev_err(priv->dev, "error initializing dma channels\n");
+ ret = -ENOMEM;
+ goto clean_dma_ret;
+ }
+
+ memset(&ale_params, 0, sizeof(ale_params));
+ ale_params.dev = &ndev->dev;
+ ale_params.ale_regs = (void *)((u32)priv->regs) +
+ ((u32)data->ale_reg_ofs);
+ ale_params.ale_ageout = ale_ageout;
+ ale_params.ale_entries = data->ale_entries;
+ ale_params.ale_ports = data->slaves;
+
+ priv->ale = cpsw_ale_create(&ale_params);
+ if (!priv->ale) {
+ dev_err(priv->dev, "error initializing ale engine\n");
+ ret = -ENODEV;
+ goto clean_dma_ret;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ dev_err(priv->dev, "error getting irq resource\n");
+ ret = -ENOENT;
+ goto clean_ale_ret;
+ }
+
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
+ for (i = res->start; i <= res->end; i++) {
+ if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
+ dev_name(&pdev->dev), priv)) {
+ dev_err(priv->dev, "error attaching irq\n");
+ goto clean_ale_ret;
+ }
+ priv->irqs_table[k] = i;
+ priv->num_irqs = k;
+ }
+ k++;
+ }
+
+ ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "error registering net device\n");
+ ret = -ENODEV;
+ goto clean_irq_ret;
+ }
+
+ cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
+ priv->cpsw_res->start, ndev->irq);
+
+ return 0;
+
+clean_irq_ret:
+ free_irq(ndev->irq, priv);
+clean_ale_ret:
+ cpsw_ale_destroy(priv->ale);
+clean_dma_ret:
+ cpdma_chan_destroy(priv->txch);
+ cpdma_chan_destroy(priv->rxch);
+ cpdma_ctlr_destroy(priv->dma);
+clean_iomap_ret:
+ iounmap(priv->regs);
+clean_cpsw_ss_iores_ret:
+ release_mem_region(priv->cpsw_ss_res->start,
+ resource_size(priv->cpsw_ss_res));
+clean_cpsw_iores_ret:
+ release_mem_region(priv->cpsw_res->start,
+ resource_size(priv->cpsw_res));
+clean_clk_ret:
+ clk_put(priv->clk);
+ kfree(priv->slaves);
+clean_ndev_ret:
+ free_netdev(ndev);
+ return ret;
+}
+
+static int __devexit cpsw_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ pr_info("removing device");
+ platform_set_drvdata(pdev, NULL);
+
+ free_irq(ndev->irq, priv);
+ cpsw_ale_destroy(priv->ale);
+ cpdma_chan_destroy(priv->txch);
+ cpdma_chan_destroy(priv->rxch);
+ cpdma_ctlr_destroy(priv->dma);
+ iounmap(priv->regs);
+ release_mem_region(priv->cpsw_res->start,
+ resource_size(priv->cpsw_res));
+ release_mem_region(priv->cpsw_ss_res->start,
+ resource_size(priv->cpsw_ss_res));
+ clk_put(priv->clk);
+ kfree(priv->slaves);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static int cpsw_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ if (netif_running(ndev))
+ cpsw_ndo_stop(ndev);
+ return 0;
+}
+
+static int cpsw_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ if (netif_running(ndev))
+ cpsw_ndo_open(ndev);
+ return 0;
+}
+
+static const struct dev_pm_ops cpsw_pm_ops = {
+ .suspend = cpsw_suspend,
+ .resume = cpsw_resume,
+};
+
+static struct platform_driver cpsw_driver = {
+ .driver = {
+ .name = "cpsw",
+ .owner = THIS_MODULE,
+ .pm = &cpsw_pm_ops,
+ },
+ .probe = cpsw_probe,
+ .remove = __devexit_p(cpsw_remove),
+};
+
+static int __init cpsw_init(void)
+{
+ return platform_driver_register(&cpsw_driver);
+}
+late_initcall(cpsw_init);
+
+static void __exit cpsw_exit(void)
+{
+ platform_driver_unregister(&cpsw_driver);
+}
+module_exit(cpsw_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_DESCRIPTION("TI CPSW Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
new file mode 100644
index 000000000000..ca0d48a7e508
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -0,0 +1,641 @@
+/*
+ * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+
+#include "cpsw_ale.h"
+
+#define BITMASK(bits) (BIT(bits) - 1)
+#define ALE_ENTRY_BITS 68
+#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
+#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff)
+#define ALE_VERSION_MINOR(rev) (rev & 0xff)
+
+/* ALE Registers */
+#define ALE_IDVER 0x00
+#define ALE_CONTROL 0x08
+#define ALE_PRESCALE 0x10
+#define ALE_UNKNOWNVLAN 0x18
+#define ALE_TABLE_CONTROL 0x20
+#define ALE_TABLE 0x34
+#define ALE_PORTCTL 0x40
+
+#define ALE_TABLE_WRITE BIT(31)
+
+#define ALE_TYPE_FREE 0
+#define ALE_TYPE_ADDR 1
+#define ALE_TYPE_VLAN 2
+#define ALE_TYPE_VLAN_ADDR 3
+
+#define ALE_UCAST_PERSISTANT 0
+#define ALE_UCAST_UNTOUCHED 1
+#define ALE_UCAST_OUI 2
+#define ALE_UCAST_TOUCHED 3
+
+static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+{
+ int idx;
+
+ idx = start / 32;
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ return (ale_entry[idx] >> start) & BITMASK(bits);
+}
+
+static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ u32 value)
+{
+ int idx;
+
+ value &= BITMASK(bits);
+ idx = start / 32;
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ ale_entry[idx] &= ~(BITMASK(bits) << start);
+ ale_entry[idx] |= (value << start);
+}
+
+#define DEFINE_ALE_FIELD(name, start, bits) \
+static inline int cpsw_ale_get_##name(u32 *ale_entry) \
+{ \
+ return cpsw_ale_get_field(ale_entry, start, bits); \
+} \
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
+{ \
+ cpsw_ale_set_field(ale_entry, start, bits, value); \
+}
+
+DEFINE_ALE_FIELD(entry_type, 60, 2)
+DEFINE_ALE_FIELD(vlan_id, 48, 12)
+DEFINE_ALE_FIELD(mcast_state, 62, 2)
+DEFINE_ALE_FIELD(port_mask, 66, 3)
+DEFINE_ALE_FIELD(super, 65, 1)
+DEFINE_ALE_FIELD(ucast_type, 62, 2)
+DEFINE_ALE_FIELD(port_num, 66, 2)
+DEFINE_ALE_FIELD(blocked, 65, 1)
+DEFINE_ALE_FIELD(secure, 64, 1)
+DEFINE_ALE_FIELD(vlan_untag_force, 24, 3)
+DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3)
+DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3)
+DEFINE_ALE_FIELD(vlan_member_list, 0, 3)
+DEFINE_ALE_FIELD(mcast, 40, 1)
+
+/* The MAC address field in the ALE entry cannot be macroized as above */
+static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
+}
+
+static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
+}
+
+static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+{
+ int i;
+
+ WARN_ON(idx > ale->params.ale_entries);
+
+ __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ ale_entry[i] = __raw_readl(ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
+
+ return idx;
+}
+
+static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+{
+ int i;
+
+ WARN_ON(idx > ale->params.ale_entries);
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ __raw_writel(ale_entry[i], ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
+
+ __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+ ALE_TABLE_CONTROL);
+
+ return idx;
+}
+
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ u8 entry_addr[6];
+
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ cpsw_ale_get_addr(ale_entry, entry_addr);
+ if (memcmp(entry_addr, addr, 6) == 0)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_match_free(struct cpsw_ale *ale)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type == ALE_TYPE_FREE)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_find_ageable(struct cpsw_ale *ale)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ if (cpsw_ale_get_mcast(ale_entry))
+ continue;
+ type = cpsw_ale_get_ucast_type(ale_entry);
+ if (type != ALE_UCAST_PERSISTANT &&
+ type != ALE_UCAST_OUI)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ int port_mask)
+{
+ int mask;
+
+ mask = cpsw_ale_get_port_mask(ale_entry);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+ mask &= ~port_mask;
+
+ /* free if only remaining port is host port */
+ if (mask == BIT(ale->params.ale_ports))
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ else
+ cpsw_ale_set_port_mask(ale_entry, mask);
+}
+
+static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
+ int port_mask)
+{
+ int port;
+
+ port = cpsw_ale_get_port_num(ale_entry);
+ if ((BIT(port) & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int ret, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ ret = cpsw_ale_get_entry_type(ale_entry);
+ if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+ continue;
+
+ if (cpsw_ale_get_mcast(ale_entry))
+ cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+ else
+ cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+ return 0;
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
+ cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
+ cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_port_num(ale_entry, port);
+
+ idx = cpsw_ale_match_addr(ale, addr);
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_addr(ale, addr);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int super, int mcast_state)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx, mask;
+
+ idx = cpsw_ale_match_addr(ale, addr);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_super(ale_entry, super);
+ cpsw_ale_set_mcast_state(ale_entry, mcast_state);
+
+ mask = cpsw_ale_get_port_mask(ale_entry);
+ port_mask |= mask;
+ cpsw_ale_set_port_mask(ale_entry, port_mask);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_addr(ale, addr);
+ if (idx < 0)
+ return -EINVAL;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ if (port_mask)
+ cpsw_ale_set_port_mask(ale_entry, port_mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+struct ale_control_info {
+ const char *name;
+ int offset, port_offset;
+ int shift, port_shift;
+ int bits;
+};
+
+static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
+ [ALE_ENABLE] = {
+ .name = "enable",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 31,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_CLEAR] = {
+ .name = "clear",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 30,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_AGEOUT] = {
+ .name = "ageout",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 29,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_VLAN_NOLEARN] = {
+ .name = "vlan_nolearn",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 7,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_NO_PORT_VLAN] = {
+ .name = "no_port_vlan",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 6,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_OUI_DENY] = {
+ .name = "oui_deny",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 5,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_BYPASS] = {
+ .name = "bypass",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 4,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_RATE_LIMIT_TX] = {
+ .name = "rate_limit_tx",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 3,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_VLAN_AWARE] = {
+ .name = "vlan_aware",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 2,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_AUTH_ENABLE] = {
+ .name = "auth_enable",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 1,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_RATE_LIMIT] = {
+ .name = "rate_limit",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_STATE] = {
+ .name = "port_state",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 2,
+ },
+ [ALE_PORT_DROP_UNTAGGED] = {
+ .name = "drop_untagged",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 2,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_DROP_UNKNOWN_VLAN] = {
+ .name = "drop_unknown",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 3,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_NOLEARN] = {
+ .name = "nolearn",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 4,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_MCAST_LIMIT] = {
+ .name = "mcast_limit",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 16,
+ .port_shift = 0,
+ .bits = 8,
+ },
+ [ALE_PORT_BCAST_LIMIT] = {
+ .name = "bcast_limit",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 24,
+ .port_shift = 0,
+ .bits = 8,
+ },
+ [ALE_PORT_UNKNOWN_VLAN_MEMBER] = {
+ .name = "unknown_vlan_member",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_PORT_UNKNOWN_MCAST_FLOOD] = {
+ .name = "unknown_mcast_flood",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 8,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = {
+ .name = "unknown_reg_flood",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 16,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_PORT_UNTAGGED_EGRESS] = {
+ .name = "untagged_egress",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 24,
+ .port_shift = 0,
+ .bits = 6,
+ },
+};
+
+int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
+ int value)
+{
+ const struct ale_control_info *info;
+ int offset, shift;
+ u32 tmp, mask;
+
+ if (control < 0 || control >= ARRAY_SIZE(ale_controls))
+ return -EINVAL;
+
+ info = &ale_controls[control];
+ if (info->port_offset == 0 && info->port_shift == 0)
+ port = 0; /* global, port is a dont care */
+
+ if (port < 0 || port > ale->params.ale_ports)
+ return -EINVAL;
+
+ mask = BITMASK(info->bits);
+ if (value & ~mask)
+ return -EINVAL;
+
+ offset = info->offset + (port * info->port_offset);
+ shift = info->shift + (port * info->port_shift);
+
+ tmp = __raw_readl(ale->params.ale_regs + offset);
+ tmp = (tmp & ~(mask << shift)) | (value << shift);
+ __raw_writel(tmp, ale->params.ale_regs + offset);
+
+ return 0;
+}
+
+int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
+{
+ const struct ale_control_info *info;
+ int offset, shift;
+ u32 tmp;
+
+ if (control < 0 || control >= ARRAY_SIZE(ale_controls))
+ return -EINVAL;
+
+ info = &ale_controls[control];
+ if (info->port_offset == 0 && info->port_shift == 0)
+ port = 0; /* global, port is a dont care */
+
+ if (port < 0 || port > ale->params.ale_ports)
+ return -EINVAL;
+
+ offset = info->offset + (port * info->port_offset);
+ shift = info->shift + (port * info->port_shift);
+
+ tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
+ return tmp & BITMASK(info->bits);
+}
+
+static void cpsw_ale_timer(unsigned long arg)
+{
+ struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+}
+
+int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
+{
+ del_timer_sync(&ale->timer);
+ ale->ageout = ageout * HZ;
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+ return 0;
+}
+
+void cpsw_ale_start(struct cpsw_ale *ale)
+{
+ u32 rev;
+
+ rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
+ dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n",
+ ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev));
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+ init_timer(&ale->timer);
+ ale->timer.data = (unsigned long)ale;
+ ale->timer.function = cpsw_ale_timer;
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+}
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+ del_timer_sync(&ale->timer);
+}
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+ struct cpsw_ale *ale;
+
+ ale = kzalloc(sizeof(*ale), GFP_KERNEL);
+ if (!ale)
+ return NULL;
+
+ ale->params = *params;
+ ale->ageout = ale->params.ale_ageout * HZ;
+
+ return ale;
+}
+
+int cpsw_ale_destroy(struct cpsw_ale *ale)
+{
+ if (!ale)
+ return -EINVAL;
+ cpsw_ale_stop(ale);
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+ kfree(ale);
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
new file mode 100644
index 000000000000..a95b37beb02d
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -0,0 +1,93 @@
+/*
+ * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __TI_CPSW_ALE_H__
+#define __TI_CPSW_ALE_H__
+
+struct cpsw_ale_params {
+ struct device *dev;
+ void __iomem *ale_regs;
+ unsigned long ale_ageout; /* in secs */
+ unsigned long ale_entries;
+ unsigned long ale_ports;
+};
+
+struct cpsw_ale {
+ struct cpsw_ale_params params;
+ struct timer_list timer;
+ unsigned long ageout;
+};
+
+enum cpsw_ale_control {
+ /* global */
+ ALE_ENABLE,
+ ALE_CLEAR,
+ ALE_AGEOUT,
+ ALE_VLAN_NOLEARN,
+ ALE_NO_PORT_VLAN,
+ ALE_OUI_DENY,
+ ALE_BYPASS,
+ ALE_RATE_LIMIT_TX,
+ ALE_VLAN_AWARE,
+ ALE_AUTH_ENABLE,
+ ALE_RATE_LIMIT,
+ /* port controls */
+ ALE_PORT_STATE,
+ ALE_PORT_DROP_UNTAGGED,
+ ALE_PORT_DROP_UNKNOWN_VLAN,
+ ALE_PORT_NOLEARN,
+ ALE_PORT_UNKNOWN_VLAN_MEMBER,
+ ALE_PORT_UNKNOWN_MCAST_FLOOD,
+ ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+ ALE_PORT_UNTAGGED_EGRESS,
+ ALE_PORT_BCAST_LIMIT,
+ ALE_PORT_MCAST_LIMIT,
+ ALE_NUM_CONTROLS,
+};
+
+enum cpsw_ale_port_state {
+ ALE_PORT_STATE_DISABLE = 0x00,
+ ALE_PORT_STATE_BLOCK = 0x01,
+ ALE_PORT_STATE_LEARN = 0x02,
+ ALE_PORT_STATE_FORWARD = 0x03,
+};
+
+/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
+#define ALE_SECURE 1
+#define ALE_BLOCKED 2
+
+#define ALE_MCAST_FWD 0
+#define ALE_MCAST_BLOCK_LEARN_FWD 1
+#define ALE_MCAST_FWD_LEARN 2
+#define ALE_MCAST_FWD_2 3
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
+int cpsw_ale_destroy(struct cpsw_ale *ale);
+
+void cpsw_ale_start(struct cpsw_ale *ale);
+void cpsw_ale_stop(struct cpsw_ale *ale);
+
+int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
+int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int super, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
+
+int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
+int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
+ int control, int value);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index c97d2f590855..34558766cbf0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -822,7 +822,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
dma_reg_write(ctlr, chan->int_clear, chan->mask);
/* trigger teardown */
- dma_reg_write(ctlr, chan->td, chan->chan_num);
+ dma_reg_write(ctlr, chan->td, chan_linear(chan));
/* wait for teardown complete */
timeout = jiffies + HZ/10; /* 100 msec */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4b2f54565f64..174a3348f676 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -992,10 +992,9 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
{
- struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
+ struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size);
if (WARN_ON(!skb))
return NULL;
- skb->dev = priv->ndev;
skb_reserve(skb, NET_IP_ALIGN);
return skb;
}
@@ -1257,15 +1256,15 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
struct sockaddr *sa = addr;
if (!is_valid_ether_addr(sa->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+ ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
- memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
}
@@ -1792,7 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
- dev_err(&pdev->dev, "error allocating net_device\n");
rc = -ENOMEM;
goto free_clk;
}
@@ -1900,7 +1898,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if none passed */
- random_ether_addr(priv->mac_addr);
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
priv->mac_addr);
}
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index af8b8fc39eb2..2757c7d6e633 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -53,7 +53,7 @@ struct davinci_mdio_regs {
u32 control;
#define CONTROL_IDLE BIT(31)
#define CONTROL_ENABLE BIT(30)
-#define CONTROL_MAX_DIV (0xff)
+#define CONTROL_MAX_DIV (0xffff)
u32 alive;
u32 link;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 9c0dd6b8d6c9..817ad3bc4957 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -486,7 +486,6 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct tlan_priv));
if (dev == NULL) {
- pr_err("Could not allocate memory for device\n");
rc = -ENOMEM;
goto err_out_regions;
}
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index d9951afb9269..261356c2dc99 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -419,7 +419,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
#endif
/* Avoid "false sharing" with last cache line. */
- /* ISSUE: This is already done by "dev_alloc_skb()". */
+ /* ISSUE: This is already done by "netdev_alloc_skb()". */
unsigned int len =
(((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
@@ -433,7 +433,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
struct sk_buff **skb_ptr;
/* Request 96 extra bytes for alignment purposes. */
- skb = dev_alloc_skb(len + padding);
+ skb = netdev_alloc_skb(info->napi->dev, len + padding);
if (skb == NULL)
return false;
@@ -2186,10 +2186,11 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
/* ISSUE: Note that "dev_addr" is now a pointer. */
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -2254,7 +2255,7 @@ static int tile_net_get_mac(struct net_device *dev)
* can't get its MAC address, we are most likely running
* the simulator, so let's generate a random MAC address.
*/
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
return 0;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index fd4ed7f8cfa1..5c14f82c4954 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1621,10 +1621,9 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
kfree(target->hwinfo);
target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
GFP_KERNEL);
- if (!target->hwinfo) {
- pr_info("%s: kzalloc failed\n", __func__);
+ if (!target->hwinfo)
continue;
- }
+
/* copy hw scan info */
memcpy(target->hwinfo, scan_info, scan_info->size);
target->essid_len = strnlen(scan_info->essid,
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 71b785cd7563..651a70c55e6e 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -453,7 +453,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
dma_addr_t *dma_handle)
{
struct sk_buff *skb;
- skb = dev_alloc_skb(RX_BUF_SIZE);
+ skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
if (!skb)
return NULL;
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
@@ -808,10 +808,9 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
/* dev zeroed in alloc_etherdev */
dev = alloc_etherdev(sizeof(*lp));
- if (dev == NULL) {
- dev_err(&pdev->dev, "unable to alloc new ethernet\n");
+ if (dev == NULL)
return -ENOMEM;
- }
+
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
lp->dev = dev;
@@ -850,7 +849,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
/* Retrieve the ethernet address. */
if (tc35815_init_dev_addr(dev)) {
dev_warn(&pdev->dev, "not valid ether addr\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
rc = register_netdev(dev);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 164fb775d7b3..840e0e9031f5 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1148,7 +1148,7 @@ static int tsi108_set_mac(struct net_device *dev, void *addr)
int i;
if (!is_valid_ether_addr(addr))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
for (i = 0; i < 6; i++)
/* +2 is for the offset of the HW addr type */
@@ -1582,10 +1582,8 @@ tsi108_init_one(struct platform_device *pdev)
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
- if (!dev) {
- printk("tsi108_eth: Could not allocate a device structure\n");
+ if (!dev)
return -ENOMEM;
- }
printk("tsi108_eth%d: probe...\n", pdev->id);
data = netdev_priv(dev);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 10b18eb63d25..39b8cf3dafcd 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -927,7 +927,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- dev_err(&pdev->dev, "alloc_etherdev failed\n");
goto err_out;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -984,7 +983,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (!is_valid_ether_addr(dev->dev_addr)) {
/* Report it and use a random ethernet address instead */
netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
netdev_info(dev, "Using random MAC address: %pM\n",
dev->dev_addr);
}
@@ -1156,7 +1155,6 @@ static void alloc_rbufs(struct net_device *dev)
rp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- skb->dev = dev; /* Mark as being used by this device. */
rp->rx_skbuff_dma[i] =
pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
@@ -1941,7 +1939,6 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- skb->dev = dev; /* Mark as being used by this device. */
rp->rx_skbuff_dma[entry] =
pci_map_single(rp->pdev, skb->data,
rp->rx_buf_sz,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index cb35b14b73bb..8a5d7c100a5e 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1509,7 +1509,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
struct rx_desc *rd = &(vptr->rx.ring[idx]);
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
- rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
+ rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
if (rd_info->skb == NULL)
return -ENOMEM;
@@ -2730,10 +2730,8 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
}
dev = alloc_etherdev(sizeof(struct velocity_info));
- if (!dev) {
- dev_err(&pdev->dev, "allocate net device failed.\n");
+ if (!dev)
goto out;
- }
/* Chain it all together */
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index d5a826063a82..5778a4ae1164 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -25,6 +25,14 @@ config XILINX_EMACLITE
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
+config XILINX_AXI_EMAC
+ tristate "Xilinx 10/100/1000 AXI Ethernet support"
+ depends on (PPC32 || MICROBLAZE)
+ select PHYLIB
+ ---help---
+ This driver supports the 10/100/1000 Ethernet from Xilinx for the
+ AXI bus interface used in Xilinx Virtex FPGAs.
+
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
depends on (PPC || MICROBLAZE)
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
index 5feac734ea45..214205e975e3 100644
--- a/drivers/net/ethernet/xilinx/Makefile
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -5,3 +5,5 @@
ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
+xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
+obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index f21addb1db95..d21591a2c593 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -327,7 +327,9 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- random_ether_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
+ else
+ ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
/* set up unicast MAC address filter set its mac address */
mutex_lock(&lp->indirect_mutex);
@@ -1011,10 +1013,9 @@ static int __devinit temac_of_probe(struct platform_device *op)
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
- if (!ndev) {
- dev_err(&op->dev, "could not allocate device.\n");
+ if (!ndev)
return -ENOMEM;
- }
+
ether_setup(ndev);
dev_set_drvdata(&op->dev, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
new file mode 100644
index 000000000000..cc83af083fd7
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -0,0 +1,508 @@
+/*
+ * Definitions for Xilinx Axi Ethernet device driver.
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
+ * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
+ * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ */
+
+#ifndef XILINX_AXIENET_H
+#define XILINX_AXIENET_H
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+/* Packet size info */
+#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
+#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
+#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
+#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
+#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
+
+#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
+
+/* Configuration options */
+
+/* Accept all incoming packets. Default: disabled (cleared) */
+#define XAE_OPTION_PROMISC (1 << 0)
+
+/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
+#define XAE_OPTION_JUMBO (1 << 1)
+
+/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
+#define XAE_OPTION_VLAN (1 << 2)
+
+/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
+#define XAE_OPTION_FLOW_CONTROL (1 << 4)
+
+/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
+ * stripped. Default: disabled (set) */
+#define XAE_OPTION_FCS_STRIP (1 << 5)
+
+/* Generate FCS field and add PAD automatically for outgoing frames.
+ * Default: enabled (set) */
+#define XAE_OPTION_FCS_INSERT (1 << 6)
+
+/* Enable Length/Type error checking for incoming frames. When this option is
+ * set, the MAC will filter frames that have a mismatched type/length field
+ * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
+ * types of frames are encountered. When this option is cleared, the MAC will
+ * allow these types of frames to be received. Default: enabled (set) */
+#define XAE_OPTION_LENTYPE_ERR (1 << 7)
+
+/* Enable the transmitter. Default: enabled (set) */
+#define XAE_OPTION_TXEN (1 << 11)
+
+/* Enable the receiver. Default: enabled (set) */
+#define XAE_OPTION_RXEN (1 << 12)
+
+/* Default options set when device is initialized or reset */
+#define XAE_OPTION_DEFAULTS \
+ (XAE_OPTION_TXEN | \
+ XAE_OPTION_FLOW_CONTROL | \
+ XAE_OPTION_RXEN)
+
+/* Axi DMA Register definitions */
+
+#define XAXIDMA_TX_CR_OFFSET 0x00000000 /* Channel control */
+#define XAXIDMA_TX_SR_OFFSET 0x00000004 /* Status */
+#define XAXIDMA_TX_CDESC_OFFSET 0x00000008 /* Current descriptor pointer */
+#define XAXIDMA_TX_TDESC_OFFSET 0x00000010 /* Tail descriptor pointer */
+
+#define XAXIDMA_RX_CR_OFFSET 0x00000030 /* Channel control */
+#define XAXIDMA_RX_SR_OFFSET 0x00000034 /* Status */
+#define XAXIDMA_RX_CDESC_OFFSET 0x00000038 /* Current descriptor pointer */
+#define XAXIDMA_RX_TDESC_OFFSET 0x00000040 /* Tail descriptor pointer */
+
+#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
+#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+
+#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
+#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
+#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
+#define XAXIDMA_BD_STS_OFFSET 0x1C /* Status */
+#define XAXIDMA_BD_USR0_OFFSET 0x20 /* User IP specific word0 */
+#define XAXIDMA_BD_USR1_OFFSET 0x24 /* User IP specific word1 */
+#define XAXIDMA_BD_USR2_OFFSET 0x28 /* User IP specific word2 */
+#define XAXIDMA_BD_USR3_OFFSET 0x2C /* User IP specific word3 */
+#define XAXIDMA_BD_USR4_OFFSET 0x30 /* User IP specific word4 */
+#define XAXIDMA_BD_ID_OFFSET 0x34 /* Sw ID */
+#define XAXIDMA_BD_HAS_STSCNTRL_OFFSET 0x38 /* Whether has stscntrl strm */
+#define XAXIDMA_BD_HAS_DRE_OFFSET 0x3C /* Whether has DRE */
+
+#define XAXIDMA_BD_HAS_DRE_SHIFT 8 /* Whether has DRE shift */
+#define XAXIDMA_BD_HAS_DRE_MASK 0xF00 /* Whether has DRE mask */
+#define XAXIDMA_BD_WORDLEN_MASK 0xFF /* Whether has DRE mask */
+
+#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
+#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
+#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
+#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
+
+#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XAXIDMA_DELAY_SHIFT 24
+#define XAXIDMA_COALESCE_SHIFT 16
+
+#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
+#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+#define XAXIDMA_DFT_TX_THRESHOLD 24
+#define XAXIDMA_DFT_TX_WAITBOUND 254
+#define XAXIDMA_DFT_RX_THRESHOLD 24
+#define XAXIDMA_DFT_RX_WAITBOUND 254
+
+#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
+#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
+#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
+
+#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
+#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
+#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
+#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
+#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
+#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
+#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
+#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
+#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
+
+#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
+
+/* Axi Ethernet registers definition */
+#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */
+#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */
+#define XAE_IFGP_OFFSET 0x00000008 /* Tx Inter-frame gap adjustment*/
+#define XAE_IS_OFFSET 0x0000000C /* Interrupt status */
+#define XAE_IP_OFFSET 0x00000010 /* Interrupt pending */
+#define XAE_IE_OFFSET 0x00000014 /* Interrupt enable */
+#define XAE_TTAG_OFFSET 0x00000018 /* Tx VLAN TAG */
+#define XAE_RTAG_OFFSET 0x0000001C /* Rx VLAN TAG */
+#define XAE_UAWL_OFFSET 0x00000020 /* Unicast address word lower */
+#define XAE_UAWU_OFFSET 0x00000024 /* Unicast address word upper */
+#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */
+#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */
+#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */
+#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */
+#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
+#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
+#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
+#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
+#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
+#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
+#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending
+ * register offset */
+#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable
+ * register offset */
+#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear
+ * register offset. */
+#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
+#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
+#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
+#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
+
+#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */
+#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */
+#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
+
+/* Bit Masks for Axi Ethernet RAF register */
+#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast
+ * destination address */
+#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast
+ * destination address */
+#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */
+#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */
+#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
+#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
+#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
+#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast
+ * Filtering mode
+ */
+#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
+#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */
+#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */
+#define XAE_RAF_RXVTAGMODE_SHIFT 5 /* Rx Tag mode shift bits */
+#define XAE_RAF_TXVSTRPMODE_SHIFT 7 /* Tx strip mode shift bits*/
+#define XAE_RAF_RXVSTRPMODE_SHIFT 9 /* Rx Strip mode shift bits*/
+
+/* Bit Masks for Axi Ethernet TPF and IFGP registers */
+#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */
+#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame
+ * gap adjustment value */
+
+/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
+ * for all 3 registers. */
+#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access
+ * complete */
+#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation
+ * complete */
+#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */
+#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
+#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
+#define XAE_INT_TXCMPIT_MASK 0x00000020 /* Tx complete */
+#define XAE_INT_RXDCMLOCK_MASK 0x00000040 /* Rx Dcm Lock */
+#define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
+#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
+#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */
+
+#define XAE_INT_RECV_ERROR_MASK \
+ (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
+ * indicate receive
+ * errors */
+
+/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
+#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */
+#define XAE_TPID_1_MASK 0xFFFF0000 /* TPID 1 */
+
+/* Bit masks for Axi Ethernet VLAN TPID Word 1 register */
+#define XAE_TPID_2_MASK 0x0000FFFF /* TPID 0 */
+#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */
+
+/* Bit masks for Axi Ethernet RCW1 register */
+#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
+#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
+#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable
+ * (FCS not stripped) */
+#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
+#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */
+#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check
+ * disable */
+#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check
+ * disable */
+#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
+ * bits [47:32]. Bits [31:0] are
+ * stored in register RCW0 */
+
+/* Bit masks for Axi Ethernet TC register */
+#define XAE_TC_RST_MASK 0x80000000 /* Reset */
+#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
+#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable
+ * (FCS not generated) */
+#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
+#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */
+#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment
+ * enable */
+
+/* Bit masks for Axi Ethernet FCC register */
+#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */
+#define XAE_FCC_FCTX_MASK 0x40000000 /* Tx flow control enable */
+
+/* Bit masks for Axi Ethernet EMMC register */
+#define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
+#define XAE_EMMC_RGMII_MASK 0x20000000 /* RGMII mode enable */
+#define XAE_EMMC_SGMII_MASK 0x10000000 /* SGMII mode enable */
+#define XAE_EMMC_GPCS_MASK 0x08000000 /* 1000BaseX mode enable */
+#define XAE_EMMC_HOST_MASK 0x04000000 /* Host interface enable */
+#define XAE_EMMC_TX16BIT 0x02000000 /* 16 bit Tx client enable */
+#define XAE_EMMC_RX16BIT 0x01000000 /* 16 bit Rx client enable */
+#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
+#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
+#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
+
+/* Bit masks for Axi Ethernet PHYC register */
+#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/
+#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */
+#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */
+#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */
+#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */
+#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */
+#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */
+#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */
+#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
+#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+
+/* Bit masks for Axi Ethernet MDIO interface MC register */
+#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
+#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */
+
+/* Bit masks for Axi Ethernet MDIO interface MCR register */
+#define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
+#define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
+#define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
+#define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
+#define XAE_MDIO_MCR_OP_MASK 0x0000C000 /* Operation Code Mask */
+#define XAE_MDIO_MCR_OP_SHIFT 13 /* Operation Code Shift */
+#define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
+#define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
+#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
+#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
+
+/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */
+#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
+
+/* Bit masks for Axi Ethernet UAW1 register */
+#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits
+ * [47:32]; Station address
+ * bits [31:0] are stored in
+ * register UAW0 */
+
+/* Bit masks for Axi Ethernet FMI register */
+#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
+#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
+
+#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
+
+/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */
+#define XAE_PHY_TYPE_MII 0
+#define XAE_PHY_TYPE_GMII 1
+#define XAE_PHY_TYPE_RGMII_1_3 2
+#define XAE_PHY_TYPE_RGMII_2_0 3
+#define XAE_PHY_TYPE_SGMII 4
+#define XAE_PHY_TYPE_1000BASE_X 5
+
+#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the
+ * hardware multicast table. */
+
+/* Axi Ethernet Synthesis features */
+#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
+#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
+#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
+#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
+
+#define XAE_NO_CSUM_OFFLOAD 0
+
+#define XAE_FULL_CSUM_STATUS_MASK 0x00000038
+#define XAE_IP_UDP_CSUM_VALIDATED 0x00000003
+#define XAE_IP_TCP_CSUM_VALIDATED 0x00000002
+
+#define DELAY_OF_ONE_MILLISEC 1000
+
+/**
+ * struct axidma_bd - Axi Dma buffer descriptor layout
+ * @next: MM2S/S2MM Next Descriptor Pointer
+ * @reserved1: Reserved and not used
+ * @phys: MM2S/S2MM Buffer Address
+ * @reserved2: Reserved and not used
+ * @reserved3: Reserved and not used
+ * @reserved4: Reserved and not used
+ * @cntrl: MM2S/S2MM Control value
+ * @status: MM2S/S2MM Status value
+ * @app0: MM2S/S2MM User Application Field 0.
+ * @app1: MM2S/S2MM User Application Field 1.
+ * @app2: MM2S/S2MM User Application Field 2.
+ * @app3: MM2S/S2MM User Application Field 3.
+ * @app4: MM2S/S2MM User Application Field 4.
+ * @sw_id_offset: MM2S/S2MM Sw ID
+ * @reserved5: Reserved and not used
+ * @reserved6: Reserved and not used
+ */
+struct axidma_bd {
+ u32 next; /* Physical address of next buffer descriptor */
+ u32 reserved1;
+ u32 phys;
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 cntrl;
+ u32 status;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum seed */
+ u32 app3;
+ u32 app4;
+ u32 sw_id_offset;
+ u32 reserved5;
+ u32 reserved6;
+};
+
+/**
+ * struct axienet_local - axienet private per device data
+ * @ndev: Pointer for net_device to which it will be attached.
+ * @dev: Pointer to device structure
+ * @phy_dev: Pointer to PHY device structure attached to the axienet_local
+ * @phy_node: Pointer to device node structure
+ * @mii_bus: Pointer to MII bus structure
+ * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure
+ * @regs: Base address for the axienet_local device address space
+ * @dma_regs: Base address for the axidma device address space
+ * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @temac_type: axienet type to identify between soft and hard temac
+ * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
+ * @options: AxiEthernet option word
+ * @last_link: Phy link state in which the PHY was negotiated earlier
+ * @features: Stores the extended features supported by the axienet hw
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while alloc. BDs before a TX starts
+ * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while processing BDs after the TX
+ * completed.
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @max_frm_size: Stores the maximum size of the frame that can be that
+ * Txed/Rxed in the existing hardware. If jumbo option is
+ * supported, the maximum frame size would be 9k. Else it is
+ * 1522 bytes (assuming support for basic VLAN)
+ * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
+ * can handle jumbo packets, this entry will be 1, else 0.
+ */
+struct axienet_local {
+ struct net_device *ndev;
+ struct device *dev;
+
+ /* Connection to PHY device */
+ struct phy_device *phy_dev; /* Pointer to PHY device */
+ struct device_node *phy_node;
+
+ /* MDIO bus data */
+ struct mii_bus *mii_bus; /* MII bus reference */
+ int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
+
+ /* IO registers, dma functions and IRQs */
+ void __iomem *regs;
+ void __iomem *dma_regs;
+
+ struct tasklet_struct dma_err_tasklet;
+
+ int tx_irq;
+ int rx_irq;
+ u32 temac_type;
+ u32 phy_type;
+
+ u32 options; /* Current options word */
+ u32 last_link;
+ u32 features;
+
+ /* Buffer descriptors */
+ struct axidma_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ struct axidma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u32 rx_bd_ci;
+
+ u32 max_frm_size;
+ u32 jumbo_support;
+
+ int csum_offload_on_tx_path;
+ int csum_offload_on_rx_path;
+
+ u32 coalesce_count_rx;
+ u32 coalesce_count_tx;
+};
+
+/**
+ * struct axiethernet_option - Used to set axi ethernet hardware options
+ * @opt: Option to be set.
+ * @reg: Register offset to be written for setting the option
+ * @m_or: Mask to be ORed for setting the option in the register
+ */
+struct axienet_option {
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+};
+
+/**
+ * axienet_ior - Memory mapped Axi Ethernet register read
+ * @lp: Pointer to axienet local structure
+ * @offset: Address offset from the base address of Axi Ethernet core
+ *
+ * returns: The contents of the Axi Ethernet register
+ *
+ * This function returns the contents of the corresponding register.
+ */
+static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
+{
+ return in_be32(lp->regs + offset);
+}
+
+/**
+ * axienet_iow - Memory mapped Axi Ethernet register write
+ * @lp: Pointer to axienet local structure
+ * @offset: Address offset from the base address of Axi Ethernet core
+ * @value: Value to be written into the Axi Ethernet register
+ *
+ * This function writes the desired value into the corresponding Axi Ethernet
+ * register.
+ */
+static inline void axienet_iow(struct axienet_local *lp, off_t offset,
+ u32 value)
+{
+ out_be32((lp->regs + offset), value);
+}
+
+/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
+int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
+int axienet_mdio_wait_until_ready(struct axienet_local *lp);
+void axienet_mdio_teardown(struct axienet_local *lp);
+
+#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
new file mode 100644
index 000000000000..2fcbeba6814b
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -0,0 +1,1669 @@
+/*
+ * Xilinx Axi Ethernet device driver
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
+ * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
+ * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ *
+ * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
+ * and Spartan6.
+ *
+ * TODO:
+ * - Add Axi Fifo support.
+ * - Factor out Axi DMA code into separate driver.
+ * - Test and fix basic multicast filtering.
+ * - Add support for extended multicast filtering.
+ * - Test basic VLAN support.
+ * - Add support for extended VLAN support.
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+
+#include "xilinx_axienet.h"
+
+/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
+#define DRIVER_NAME "xaxienet"
+#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
+#define DRIVER_VERSION "1.00a"
+
+#define AXIENET_REGS_N 32
+
+/* Match table for of_platform binding */
+static struct of_device_id axienet_of_match[] __devinitdata = {
+ { .compatible = "xlnx,axi-ethernet-1.00.a", },
+ { .compatible = "xlnx,axi-ethernet-1.01.a", },
+ { .compatible = "xlnx,axi-ethernet-2.01.a", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, axienet_of_match);
+
+/* Option table for setting up Axi Ethernet hardware options */
+static struct axienet_option axienet_options[] = {
+ /* Turn on jumbo packet support for both Rx and Tx */
+ {
+ .opt = XAE_OPTION_JUMBO,
+ .reg = XAE_TC_OFFSET,
+ .m_or = XAE_TC_JUM_MASK,
+ }, {
+ .opt = XAE_OPTION_JUMBO,
+ .reg = XAE_RCW1_OFFSET,
+ .m_or = XAE_RCW1_JUM_MASK,
+ }, { /* Turn on VLAN packet support for both Rx and Tx */
+ .opt = XAE_OPTION_VLAN,
+ .reg = XAE_TC_OFFSET,
+ .m_or = XAE_TC_VLAN_MASK,
+ }, {
+ .opt = XAE_OPTION_VLAN,
+ .reg = XAE_RCW1_OFFSET,
+ .m_or = XAE_RCW1_VLAN_MASK,
+ }, { /* Turn on FCS stripping on receive packets */
+ .opt = XAE_OPTION_FCS_STRIP,
+ .reg = XAE_RCW1_OFFSET,
+ .m_or = XAE_RCW1_FCS_MASK,
+ }, { /* Turn on FCS insertion on transmit packets */
+ .opt = XAE_OPTION_FCS_INSERT,
+ .reg = XAE_TC_OFFSET,
+ .m_or = XAE_TC_FCS_MASK,
+ }, { /* Turn off length/type field checking on receive packets */
+ .opt = XAE_OPTION_LENTYPE_ERR,
+ .reg = XAE_RCW1_OFFSET,
+ .m_or = XAE_RCW1_LT_DIS_MASK,
+ }, { /* Turn on Rx flow control */
+ .opt = XAE_OPTION_FLOW_CONTROL,
+ .reg = XAE_FCC_OFFSET,
+ .m_or = XAE_FCC_FCRX_MASK,
+ }, { /* Turn on Tx flow control */
+ .opt = XAE_OPTION_FLOW_CONTROL,
+ .reg = XAE_FCC_OFFSET,
+ .m_or = XAE_FCC_FCTX_MASK,
+ }, { /* Turn on promiscuous frame filtering */
+ .opt = XAE_OPTION_PROMISC,
+ .reg = XAE_FMI_OFFSET,
+ .m_or = XAE_FMI_PM_MASK,
+ }, { /* Enable transmitter */
+ .opt = XAE_OPTION_TXEN,
+ .reg = XAE_TC_OFFSET,
+ .m_or = XAE_TC_TX_MASK,
+ }, { /* Enable receiver */
+ .opt = XAE_OPTION_RXEN,
+ .reg = XAE_RCW1_OFFSET,
+ .m_or = XAE_RCW1_RX_MASK,
+ },
+ {}
+};
+
+/**
+ * axienet_dma_in32 - Memory mapped Axi DMA register read
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ *
+ * returns: The contents of the Axi DMA register
+ *
+ * This function returns the contents of the corresponding Axi DMA register.
+ */
+static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
+{
+ return in_be32(lp->dma_regs + reg);
+}
+
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out32(struct axienet_local *lp,
+ off_t reg, u32 value)
+{
+ out_be32((lp->dma_regs + reg), value);
+}
+
+/**
+ * axienet_dma_bd_release - Release buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
+ * driver stop api is called.
+ */
+static void axienet_dma_bd_release(struct net_device *ndev)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (lp->rx_bd_v[i].sw_id_offset));
+ }
+
+ if (lp->rx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
+ }
+ if (lp->tx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+ }
+}
+
+/**
+ * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
+ * @ndev: Pointer to the net_device structure
+ *
+ * returns: 0, on success
+ * -ENOMEM, on failure
+ *
+ * This function is called to initialize the Rx and Tx DMA descriptor
+ * rings. This initializes the descriptors with required default values
+ * and is called when Axi Ethernet driver reset is called.
+ */
+static int axienet_dma_bd_init(struct net_device *ndev)
+{
+ u32 cr;
+ int i;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ /* Reset the indexes which are used for accessing the BDs */
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
+ /*
+ * Allocate the Tx and Rx buffer descriptors.
+ */
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p,
+ GFP_KERNEL);
+ if (!lp->tx_bd_v) {
+ dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
+ "descriptors");
+ goto out;
+ }
+
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p,
+ GFP_KERNEL);
+ if (!lp->rx_bd_v) {
+ dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
+ "descriptors");
+ goto out;
+ }
+
+ memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
+ for (i = 0; i < TX_BD_NUM; i++) {
+ lp->tx_bd_v[i].next = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % TX_BD_NUM);
+ }
+
+ memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
+ for (i = 0; i < RX_BD_NUM; i++) {
+ lp->rx_bd_v[i].next = lp->rx_bd_p +
+ sizeof(*lp->rx_bd_v) *
+ ((i + 1) % RX_BD_NUM);
+
+ skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ if (!skb) {
+ dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ goto out;
+ }
+
+ lp->rx_bd_v[i].sw_id_offset = (u32) skb;
+ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ lp->rx_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.*/
+ axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting */
+ axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ return 0;
+out:
+ axienet_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+/**
+ * axienet_set_mac_address - Write the MAC address
+ * @ndev: Pointer to the net_device structure
+ * @address: 6 byte Address to be written as MAC address
+ *
+ * This function is called to initialize the MAC address of the Axi Ethernet
+ * core. It writes to the UAW0 and UAW1 registers of the core.
+ */
+static void axienet_set_mac_address(struct net_device *ndev, void *address)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (address)
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ random_ether_addr(ndev->dev_addr);
+
+ /* Set up unicast MAC address filter set its mac address */
+ axienet_iow(lp, XAE_UAW0_OFFSET,
+ (ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24));
+ axienet_iow(lp, XAE_UAW1_OFFSET,
+ (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
+ ~XAE_UAW1_UNICASTADDR_MASK) |
+ (ndev->dev_addr[4] |
+ (ndev->dev_addr[5] << 8))));
+}
+
+/**
+ * netdev_set_mac_address - Write the MAC address (from outside the driver)
+ * @ndev: Pointer to the net_device structure
+ * @p: 6 byte Address to be written as MAC address
+ *
+ * returns: 0 for all conditions. Presently, there is no failure case.
+ *
+ * This function is called to initialize the MAC address of the Axi Ethernet
+ * core. It calls the core specific axienet_set_mac_address. This is the
+ * function that goes into net_device_ops structure entry ndo_set_mac_address.
+ */
+static int netdev_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+ axienet_set_mac_address(ndev, addr->sa_data);
+ return 0;
+}
+
+/**
+ * axienet_set_multicast_list - Prepare the multicast table
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is called to initialize the multicast table during
+ * initialization. The Axi Ethernet basic multicast support has a four-entry
+ * multicast table which is initialized here. Additionally this function
+ * goes into the net_device_ops structure entry ndo_set_multicast_list. This
+ * means whenever the multicast table entries need to be updated this
+ * function gets called.
+ */
+static void axienet_set_multicast_list(struct net_device *ndev)
+{
+ int i;
+ u32 reg, af0reg, af1reg;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
+ netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
+ /* We must make the kernel realize we had to move into
+ * promiscuous mode. If it was a promiscuous mode request
+ * the flag is already set. If not we set it. */
+ ndev->flags |= IFF_PROMISC;
+ reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg |= XAE_FMI_PM_MASK;
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
+ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
+ break;
+
+ af0reg = (ha->addr[0]);
+ af0reg |= (ha->addr[1] << 8);
+ af0reg |= (ha->addr[2] << 16);
+ af0reg |= (ha->addr[3] << 24);
+
+ af1reg = (ha->addr[4]);
+ af1reg |= (ha->addr[5] << 8);
+
+ reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg |= i;
+
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
+ axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
+ i++;
+ }
+ } else {
+ reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg &= ~XAE_FMI_PM_MASK;
+
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+
+ for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
+ reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg |= i;
+
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_AF0_OFFSET, 0);
+ axienet_iow(lp, XAE_AF1_OFFSET, 0);
+ }
+
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ }
+}
+
+/**
+ * axienet_setoptions - Set an Axi Ethernet option
+ * @ndev: Pointer to the net_device structure
+ * @options: Option to be enabled/disabled
+ *
+ * The Axi Ethernet core has multiple features which can be selectively turned
+ * on or off. The typical options could be jumbo frame option, basic VLAN
+ * option, promiscuous mode option etc. This function is used to set or clear
+ * these options in the Axi Ethernet hardware. This is done through
+ * axienet_option structure .
+ */
+static void axienet_setoptions(struct net_device *ndev, u32 options)
+{
+ int reg;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_option *tp = &axienet_options[0];
+
+ while (tp->opt) {
+ reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
+ if (options & tp->opt)
+ reg |= tp->m_or;
+ axienet_iow(lp, tp->reg, reg);
+ tp++;
+ }
+
+ lp->options |= options;
+}
+
+static void __axienet_device_reset(struct axienet_local *lp,
+ struct device *dev, off_t offset)
+{
+ u32 timeout;
+ /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
+ * process of Axi DMA takes a while to complete as all pending
+ * commands/transfers will be flushed or completed during this
+ * reset process. */
+ axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
+ timeout = DELAY_OF_ONE_MILLISEC;
+ while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(dev, "axienet_device_reset DMA "
+ "reset timeout!\n");
+ break;
+ }
+ }
+}
+
+/**
+ * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is called to reset and initialize the Axi Ethernet core. This
+ * is typically called during initialization. It does a reset of the Axi DMA
+ * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
+ * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
+ * Ethernet core. No separate hardware reset is done for the Axi Ethernet
+ * core.
+ */
+static void axienet_device_reset(struct net_device *ndev)
+{
+ u32 axienet_status;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
+
+ lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+ lp->options &= (~XAE_OPTION_JUMBO);
+
+ if ((ndev->mtu > XAE_MTU) &&
+ (ndev->mtu <= XAE_JUMBO_MTU) &&
+ (lp->jumbo_support)) {
+ lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
+ XAE_TRL_SIZE;
+ lp->options |= XAE_OPTION_JUMBO;
+ }
+
+ if (axienet_dma_bd_init(ndev)) {
+ dev_err(&ndev->dev, "axienet_device_reset descriptor "
+ "allocation failed\n");
+ }
+
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ /* Sync default options with HW but leave receiver and
+ * transmitter disabled.*/
+ axienet_setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ axienet_setoptions(ndev, lp->options);
+
+ ndev->trans_start = jiffies;
+}
+
+/**
+ * axienet_adjust_link - Adjust the PHY link speed/duplex.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is called to change the speed and duplex setting after
+ * auto negotiation is done by the PHY. This is the function that gets
+ * registered with the PHY interface through the "of_phy_connect" call.
+ */
+static void axienet_adjust_link(struct net_device *ndev)
+{
+ u32 emmc_reg;
+ u32 link_state;
+ u32 setspeed = 1;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+ if (lp->last_link != link_state) {
+ if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
+ if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
+ setspeed = 0;
+ } else {
+ if ((phy->speed == SPEED_1000) &&
+ (lp->phy_type == XAE_PHY_TYPE_MII))
+ setspeed = 0;
+ }
+
+ if (setspeed == 1) {
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+
+ switch (phy->speed) {
+ case SPEED_1000:
+ emmc_reg |= XAE_EMMC_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ emmc_reg |= XAE_EMMC_LINKSPD_100;
+ break;
+ case SPEED_10:
+ emmc_reg |= XAE_EMMC_LINKSPD_10;
+ break;
+ default:
+ dev_err(&ndev->dev, "Speed other than 10, 100 "
+ "or 1Gbps is not supported\n");
+ break;
+ }
+
+ axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ } else {
+ dev_err(&ndev->dev, "Error setting Axi Ethernet "
+ "mac speed\n");
+ }
+ }
+}
+
+/**
+ * axienet_start_xmit_done - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is invoked from the Axi DMA Tx isr to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static void axienet_start_xmit_done(struct net_device *ndev)
+{
+ u32 size = 0;
+ u32 packets = 0;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axidma_bd *cur_p;
+ unsigned int status = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ status = cur_p->status;
+ while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ /*cur_p->phys = 0;*/
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app4 = 0;
+ cur_p->status = 0;
+
+ size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ packets++;
+
+ lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ status = cur_p->status;
+ }
+
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += size;
+ netif_wake_queue(ndev);
+}
+
+/**
+ * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
+ * @lp: Pointer to the axienet_local structure
+ * @num_frag: The number of BDs to check for
+ *
+ * returns: 0, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked before BDs are allocated and transmission starts.
+ * This function returns 0 if a BD or group of BDs can be allocated for
+ * transmission. If the BD or any of the BDs are not free the function
+ * returns a busy status. This is invoked from axienet_start_xmit.
+ */
+static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+ int num_frag)
+{
+ struct axidma_bd *cur_p;
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
+ if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
+/**
+ * axienet_start_xmit - Starts the transmission.
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * returns: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked from upper layers to initiate transmission. The
+ * function uses the next available free BDs and populates their fields to
+ * start the transmission. Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u32 ii;
+ u32 num_frag;
+ u32 csum_start_off;
+ u32 csum_index_off;
+ skb_frag_t *frag;
+ dma_addr_t tail_p;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axidma_bd *cur_p;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+
+ if (axienet_check_tx_bd_space(lp, num_frag)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ /* Tx Full Checksum Offload Enabled */
+ cur_p->app0 |= 2;
+ } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
+ csum_start_off = skb_transport_offset(skb);
+ csum_index_off = csum_start_off + skb->csum_offset;
+ /* Tx Partial Checksum Offload Enabled */
+ cur_p->app0 |= 1;
+ cur_p->app1 = (csum_start_off << 16) | csum_index_off;
+ }
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
+ }
+
+ cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ for (ii = 0; ii < num_frag; ii++) {
+ lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ frag = &skb_shinfo(skb)->frags[ii];
+ cur_p->phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ cur_p->cntrl = skb_frag_size(frag);
+ }
+
+ cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
+ cur_p->app4 = (unsigned long)skb;
+
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ /* Start the transfer */
+ axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
+ * BD processing.
+ * @ndev: Pointer to net_device structure.
+ *
+ * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
+ * does minimal processing and invokes "netif_rx" to complete further
+ * processing.
+ */
+static void axienet_recv(struct net_device *ndev)
+{
+ u32 length;
+ u32 csumstatus;
+ u32 size = 0;
+ u32 packets = 0;
+ dma_addr_t tail_p;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb, *new_skb;
+ struct axidma_bd *cur_p;
+
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+
+ while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ skb = (struct sk_buff *) (cur_p->sw_id_offset);
+ length = cur_p->app4 & 0x0000FFFF;
+
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
+ (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == __constant_htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ netif_rx(skb);
+
+ size += length;
+ packets++;
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ if (!new_skb) {
+ dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ return;
+ }
+ cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ cur_p->cntrl = lp->max_frm_size;
+ cur_p->status = 0;
+ cur_p->sw_id_offset = (u32) new_skb;
+
+ lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ }
+
+ ndev->stats.rx_packets += packets;
+ ndev->stats.rx_bytes += size;
+
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+}
+
+/**
+ * axienet_tx_irq - Tx Done Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * returns: IRQ_HANDLED for all cases.
+ *
+ * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
+ * to complete the BD processing.
+ */
+static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_start_xmit_done(lp->ndev);
+ goto out;
+ }
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Tx path");
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet);
+ }
+out:
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_rx_irq - Rx Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * returns: IRQ_HANDLED for all cases.
+ *
+ * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * processing.
+ */
+static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_recv(lp->ndev);
+ goto out;
+ }
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Rx path");
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet);
+ }
+out:
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_open - Driver open routine.
+ * @ndev: Pointer to net_device structure
+ *
+ * returns: 0, on success.
+ * -ENODEV, if PHY cannot be connected to
+ * non-zero error value on failure
+ *
+ * This is the driver open routine. It calls phy_start to start the PHY device.
+ * It also allocates interrupt service routines, enables the interrupt lines
+ * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
+ * descriptors are initialized.
+ */
+static int axienet_open(struct net_device *ndev)
+{
+ int ret, mdio_mcreg;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "axienet_open()\n");
+
+ mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+ /* Disable the MDIO interface till Axi Ethernet Reset is completed.
+ * When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. If MDIO is not disabled when the reset
+ * process is started, MDIO will be broken afterwards. */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
+ axienet_device_reset(ndev);
+ /* Enable the MDIO */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ if (lp->phy_node) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+ if (!lp->phy_dev) {
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+ phy_start(lp->phy_dev);
+ }
+
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+ /* Enable tasklets for Axi DMA error handling */
+ tasklet_enable(&lp->dma_err_tasklet);
+ return 0;
+
+err_rx_irq:
+ free_irq(lp->tx_irq, ndev);
+err_tx_irq:
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+ dev_err(lp->dev, "request_irq() failed\n");
+ return ret;
+}
+
+/**
+ * axienet_stop - Driver stop routine.
+ * @ndev: Pointer to net_device structure
+ *
+ * returns: 0, on success.
+ *
+ * This is the driver stop routine. It calls phy_disconnect to stop the PHY
+ * device. It also removes the interrupt handlers and disables the interrupts.
+ * The Axi DMA Tx/Rx BDs are released.
+ */
+static int axienet_stop(struct net_device *ndev)
+{
+ u32 cr;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "axienet_close()\n");
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ axienet_setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ tasklet_disable(&lp->dma_err_tasklet);
+
+ free_irq(lp->tx_irq, ndev);
+ free_irq(lp->rx_irq, ndev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
+ axienet_dma_bd_release(ndev);
+ return 0;
+}
+
+/**
+ * axienet_change_mtu - Driver change mtu routine.
+ * @ndev: Pointer to net_device structure
+ * @new_mtu: New mtu value to be applied
+ *
+ * returns: Always returns 0 (success).
+ *
+ * This is the change mtu driver routine. It checks if the Axi Ethernet
+ * hardware supports jumbo frames before changing the mtu. This can be
+ * called only when the device is not up.
+ */
+static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+ if (lp->jumbo_support) {
+ if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+ return -EINVAL;
+ ndev->mtu = new_mtu;
+ } else {
+ if ((new_mtu > XAE_MTU) || (new_mtu < 64))
+ return -EINVAL;
+ ndev->mtu = new_mtu;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * axienet_poll_controller - Axi Ethernet poll mechanism.
+ * @ndev: Pointer to net_device structure
+ *
+ * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
+ * to polling the ISRs and are enabled back after the polling is done.
+ */
+static void axienet_poll_controller(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ disable_irq(lp->tx_irq);
+ disable_irq(lp->rx_irq);
+ axienet_rx_irq(lp->tx_irq, ndev);
+ axienet_tx_irq(lp->rx_irq, ndev);
+ enable_irq(lp->tx_irq);
+ enable_irq(lp->rx_irq);
+}
+#endif
+
+static const struct net_device_ops axienet_netdev_ops = {
+ .ndo_open = axienet_open,
+ .ndo_stop = axienet_stop,
+ .ndo_start_xmit = axienet_start_xmit,
+ .ndo_change_mtu = axienet_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = axienet_set_multicast_list,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = axienet_poll_controller,
+#endif
+};
+
+/**
+ * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
+ * @ndev: Pointer to net_device structure
+ * @ecmd: Pointer to ethtool_cmd structure
+ *
+ * This implements ethtool command for getting PHY settings. If PHY could
+ * not be found, the function returns -ENODEV. This function calls the
+ * relevant PHY ethtool API to get the PHY settings.
+ * Issue "ethtool ethX" under linux prompt to execute this function.
+ */
+static int axienet_ethtools_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ if (!phydev)
+ return -ENODEV;
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+/**
+ * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
+ * @ndev: Pointer to net_device structure
+ * @ecmd: Pointer to ethtool_cmd structure
+ *
+ * This implements ethtool command for setting various PHY settings. If PHY
+ * could not be found, the function returns -ENODEV. This function calls the
+ * relevant PHY ethtool API to set the PHY.
+ * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
+ * function.
+ */
+static int axienet_ethtools_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ if (!phydev)
+ return -ENODEV;
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+/**
+ * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
+ * @ndev: Pointer to net_device structure
+ * @ed: Pointer to ethtool_drvinfo structure
+ *
+ * This implements ethtool command for getting the driver information.
+ * Issue "ethtool -i ethX" under linux prompt to execute this function.
+ */
+static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *ed)
+{
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(ed->driver, DRIVER_NAME);
+ strcpy(ed->version, DRIVER_VERSION);
+ ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
+}
+
+/**
+ * axienet_ethtools_get_regs_len - Get the total regs length present in the
+ * AxiEthernet core.
+ * @ndev: Pointer to net_device structure
+ *
+ * This implements ethtool command for getting the total register length
+ * information.
+ */
+static int axienet_ethtools_get_regs_len(struct net_device *ndev)
+{
+ return sizeof(u32) * AXIENET_REGS_N;
+}
+
+/**
+ * axienet_ethtools_get_regs - Dump the contents of all registers present
+ * in AxiEthernet core.
+ * @ndev: Pointer to net_device structure
+ * @regs: Pointer to ethtool_regs structure
+ * @ret: Void pointer used to return the contents of the registers.
+ *
+ * This implements ethtool command for getting the Axi Ethernet register dump.
+ * Issue "ethtool -d ethX" to execute this function.
+ */
+static void axienet_ethtools_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *ret)
+{
+ u32 *data = (u32 *) ret;
+ size_t len = sizeof(u32) * AXIENET_REGS_N;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ regs->version = 0;
+ regs->len = len;
+
+ memset(data, 0, len);
+ data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
+ data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
+ data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
+ data[3] = axienet_ior(lp, XAE_IS_OFFSET);
+ data[4] = axienet_ior(lp, XAE_IP_OFFSET);
+ data[5] = axienet_ior(lp, XAE_IE_OFFSET);
+ data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
+ data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
+ data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
+ data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
+ data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
+ data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
+ data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
+ data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
+ data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
+ data[15] = axienet_ior(lp, XAE_TC_OFFSET);
+ data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
+ data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
+ data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
+ data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
+ data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
+ data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
+ data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
+ data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
+ data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
+ data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
+ data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
+ data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
+ data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
+ data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
+ data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
+}
+
+/**
+ * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
+ * Tx and Rx paths.
+ * @ndev: Pointer to net_device structure
+ * @epauseparm: Pointer to ethtool_pauseparam structure.
+ *
+ * This implements ethtool command for getting axi ethernet pause frame
+ * setting. Issue "ethtool -a ethX" to execute this function.
+ */
+static void
+axienet_ethtools_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epauseparm)
+{
+ u32 regval;
+ struct axienet_local *lp = netdev_priv(ndev);
+ epauseparm->autoneg = 0;
+ regval = axienet_ior(lp, XAE_FCC_OFFSET);
+ epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
+ epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
+}
+
+/**
+ * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
+ * settings.
+ * @ndev: Pointer to net_device structure
+ * @epauseparam:Pointer to ethtool_pauseparam structure
+ *
+ * This implements ethtool command for enabling flow control on Rx and Tx
+ * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
+ * function.
+ */
+static int
+axienet_ethtools_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epauseparm)
+{
+ u32 regval = 0;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ printk(KERN_ERR "%s: Please stop netif before applying "
+ "configruation\n", ndev->name);
+ return -EFAULT;
+ }
+
+ regval = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (epauseparm->tx_pause)
+ regval |= XAE_FCC_FCTX_MASK;
+ else
+ regval &= ~XAE_FCC_FCTX_MASK;
+ if (epauseparm->rx_pause)
+ regval |= XAE_FCC_FCRX_MASK;
+ else
+ regval &= ~XAE_FCC_FCRX_MASK;
+ axienet_iow(lp, XAE_FCC_OFFSET, regval);
+
+ return 0;
+}
+
+/**
+ * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
+ * @ndev: Pointer to net_device structure
+ * @ecoalesce: Pointer to ethtool_coalesce structure
+ *
+ * This implements ethtool command for getting the DMA interrupt coalescing
+ * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
+ * execute this function.
+ */
+static int axienet_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce)
+{
+ u32 regval = 0;
+ struct axienet_local *lp = netdev_priv(ndev);
+ regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ return 0;
+}
+
+/**
+ * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
+ * @ndev: Pointer to net_device structure
+ * @ecoalesce: Pointer to ethtool_coalesce structure
+ *
+ * This implements ethtool command for setting the DMA interrupt coalescing
+ * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
+ * prompt to execute this function.
+ */
+static int axienet_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ printk(KERN_ERR "%s: Please stop netif before applying "
+ "configruation\n", ndev->name);
+ return -EFAULT;
+ }
+
+ if ((ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->rx_coalesce_usecs_irq) ||
+ (ecoalesce->rx_max_coalesced_frames_irq) ||
+ (ecoalesce->tx_coalesce_usecs) ||
+ (ecoalesce->tx_coalesce_usecs_irq) ||
+ (ecoalesce->tx_max_coalesced_frames_irq) ||
+ (ecoalesce->stats_block_coalesce_usecs) ||
+ (ecoalesce->use_adaptive_rx_coalesce) ||
+ (ecoalesce->use_adaptive_tx_coalesce) ||
+ (ecoalesce->pkt_rate_low) ||
+ (ecoalesce->rx_coalesce_usecs_low) ||
+ (ecoalesce->rx_max_coalesced_frames_low) ||
+ (ecoalesce->tx_coalesce_usecs_low) ||
+ (ecoalesce->tx_max_coalesced_frames_low) ||
+ (ecoalesce->pkt_rate_high) ||
+ (ecoalesce->rx_coalesce_usecs_high) ||
+ (ecoalesce->rx_max_coalesced_frames_high) ||
+ (ecoalesce->tx_coalesce_usecs_high) ||
+ (ecoalesce->tx_max_coalesced_frames_high) ||
+ (ecoalesce->rate_sample_interval))
+ return -EOPNOTSUPP;
+ if (ecoalesce->rx_max_coalesced_frames)
+ lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->tx_max_coalesced_frames)
+ lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static struct ethtool_ops axienet_ethtool_ops = {
+ .get_settings = axienet_ethtools_get_settings,
+ .set_settings = axienet_ethtools_set_settings,
+ .get_drvinfo = axienet_ethtools_get_drvinfo,
+ .get_regs_len = axienet_ethtools_get_regs_len,
+ .get_regs = axienet_ethtools_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = axienet_ethtools_get_pauseparam,
+ .set_pauseparam = axienet_ethtools_set_pauseparam,
+ .get_coalesce = axienet_ethtools_get_coalesce,
+ .set_coalesce = axienet_ethtools_set_coalesce,
+};
+
+/**
+ * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+static void axienet_dma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i;
+ int mdio_mcreg;
+ struct axienet_local *lp = (struct axienet_local *) data;
+ struct net_device *ndev = lp->ndev;
+ struct axidma_bd *cur_p;
+
+ axienet_setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ axienet_mdio_wait_until_ready(lp);
+ /* Disable the MDIO interface till Axi Ethernet Reset is completed.
+ * When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. So if MDIO is not disabled when the reset
+ * process is started, MDIO will be broken afterwards. */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
+ ~XAE_MDIO_MC_MDIOEN_MASK));
+
+ __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
+
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
+ axienet_mdio_wait_until_ready(lp);
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ cur_p = &lp->tx_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ cur_p = &lp->rx_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Rx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.*/
+ axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting */
+ axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ /* Sync default options with HW but leave receiver and
+ * transmitter disabled.*/
+ axienet_setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ axienet_setoptions(ndev, lp->options);
+}
+
+/**
+ * axienet_of_probe - Axi Ethernet probe function.
+ * @op: Pointer to platform device structure.
+ * @match: Pointer to device id structure
+ *
+ * returns: 0, on success
+ * Non-zero error value on failure.
+ *
+ * This is the probe routine for Axi Ethernet driver. This is called before
+ * any other driver routines are invoked. It allocates and sets up the Ethernet
+ * device. Parses through device tree and populates fields of
+ * axienet_local. It registers the Ethernet device.
+ */
+static int __devinit axienet_of_probe(struct platform_device *op)
+{
+ __be32 *p;
+ int size, ret = 0;
+ struct device_node *np;
+ struct axienet_local *lp;
+ struct net_device *ndev;
+ const void *addr;
+
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev)
+ return -ENOMEM;
+
+ ether_setup(ndev);
+ dev_set_drvdata(&op->dev, ndev);
+
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->netdev_ops = &axienet_netdev_ops;
+ ndev->ethtool_ops = &axienet_ethtool_ops;
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dev = &op->dev;
+ lp->options = XAE_OPTION_DEFAULTS;
+ /* Map device registers */
+ lp->regs = of_iomap(op->dev.of_node, 0);
+ if (!lp->regs) {
+ dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ goto nodev;
+ }
+ /* Setup checksum offload, but default to off if not specified */
+ lp->features = 0;
+
+ p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
+ if (p) {
+ switch (be32_to_cpup(p)) {
+ case 1:
+ lp->csum_offload_on_tx_path =
+ XAE_FEATURE_PARTIAL_TX_CSUM;
+ lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
+ /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_IP_CSUM;
+ break;
+ case 2:
+ lp->csum_offload_on_tx_path =
+ XAE_FEATURE_FULL_TX_CSUM;
+ lp->features |= XAE_FEATURE_FULL_TX_CSUM;
+ /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_IP_CSUM;
+ break;
+ default:
+ lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
+ }
+ }
+ p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
+ if (p) {
+ switch (be32_to_cpup(p)) {
+ case 1:
+ lp->csum_offload_on_rx_path =
+ XAE_FEATURE_PARTIAL_RX_CSUM;
+ lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
+ break;
+ case 2:
+ lp->csum_offload_on_rx_path =
+ XAE_FEATURE_FULL_RX_CSUM;
+ lp->features |= XAE_FEATURE_FULL_RX_CSUM;
+ break;
+ default:
+ lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
+ }
+ }
+ /* For supporting jumbo frames, the Axi Ethernet hardware must have
+ * a larger Rx/Tx Memory. Typically, the size must be more than or
+ * equal to 16384 bytes, so that we can enable jumbo option and start
+ * supporting jumbo frames. Here we check for memory allocated for
+ * Rx/Tx in the hardware from the device-tree and accordingly set
+ * flags. */
+ p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
+ if (p) {
+ if ((be32_to_cpup(p)) >= 0x4000)
+ lp->jumbo_support = 1;
+ }
+ p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
+ NULL);
+ if (p)
+ lp->temac_type = be32_to_cpup(p);
+ p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
+ if (p)
+ lp->phy_type = be32_to_cpup(p);
+
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
+ if (!np) {
+ dev_err(&op->dev, "could not find DMA node\n");
+ goto err_iounmap;
+ }
+ lp->dma_regs = of_iomap(np, 0);
+ if (lp->dma_regs) {
+ dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
+ } else {
+ dev_err(&op->dev, "unable to map DMA registers\n");
+ of_node_put(np);
+ }
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
+ of_node_put(np);
+ if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ dev_err(&op->dev, "could not determine irqs\n");
+ ret = -ENOMEM;
+ goto err_iounmap_2;
+ }
+
+ /* Retrieve the MAC address */
+ addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
+ if ((!addr) || (size != 6)) {
+ dev_err(&op->dev, "could not find MAC address\n");
+ ret = -ENODEV;
+ goto err_iounmap_2;
+ }
+ axienet_set_mac_address(ndev, (void *) addr);
+
+ lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+
+ lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+ ret = axienet_mdio_setup(lp, op->dev.of_node);
+ if (ret)
+ dev_warn(&op->dev, "error registering MDIO bus\n");
+
+ ret = register_netdev(lp->ndev);
+ if (ret) {
+ dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
+ goto err_iounmap_2;
+ }
+
+ tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
+ (unsigned long) lp);
+ tasklet_disable(&lp->dma_err_tasklet);
+
+ return 0;
+
+err_iounmap_2:
+ if (lp->dma_regs)
+ iounmap(lp->dma_regs);
+err_iounmap:
+ iounmap(lp->regs);
+nodev:
+ free_netdev(ndev);
+ ndev = NULL;
+ return ret;
+}
+
+static int __devexit axienet_of_remove(struct platform_device *op)
+{
+ struct net_device *ndev = dev_get_drvdata(&op->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ axienet_mdio_teardown(lp);
+ unregister_netdev(ndev);
+
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ iounmap(lp->regs);
+ if (lp->dma_regs)
+ iounmap(lp->dma_regs);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver axienet_of_driver = {
+ .probe = axienet_of_probe,
+ .remove = __devexit_p(axienet_of_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xilinx_axienet",
+ .of_match_table = axienet_of_match,
+ },
+};
+
+module_platform_driver(axienet_of_driver);
+
+MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
new file mode 100644
index 000000000000..d70b6e79f6c0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -0,0 +1,238 @@
+/*
+ * MDIO bus driver for the Xilinx Axi Ethernet device
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
+ * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
+ * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/jiffies.h>
+
+#include "xilinx_axienet.h"
+
+#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
+#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
+
+/* Wait till MDIO interface is ready to accept a new transaction.*/
+int axienet_mdio_wait_until_ready(struct axienet_local *lp)
+{
+ long end = jiffies + 2;
+ while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
+ XAE_MDIO_MCR_READY_MASK)) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+/**
+ * axienet_mdio_read - MDIO interface read function
+ * @bus: Pointer to mii bus structure
+ * @phy_id: Address of the PHY device
+ * @reg: PHY register to read
+ *
+ * returns: The register contents on success, -ETIMEDOUT on a timeout
+ *
+ * Reads the contents of the requested register from the requested PHY
+ * address by first writing the details into MCR register. After a while
+ * the register MRD is read to obtain the PHY register content.
+ */
+static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ u32 rc;
+ int ret;
+ struct axienet_local *lp = bus->priv;
+
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
+ (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
+ XAE_MDIO_MCR_PHYAD_MASK) |
+ ((reg << XAE_MDIO_MCR_REGAD_SHIFT) &
+ XAE_MDIO_MCR_REGAD_MASK) |
+ XAE_MDIO_MCR_INITIATE_MASK |
+ XAE_MDIO_MCR_OP_READ_MASK));
+
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF;
+
+ dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+/**
+ * axienet_mdio_write - MDIO interface write function
+ * @bus: Pointer to mii bus structure
+ * @phy_id: Address of the PHY device
+ * @reg: PHY register to write to
+ * @val: Value to be written into the register
+ *
+ * returns: 0 on success, -ETIMEDOUT on a timeout
+ *
+ * Writes the value to the requested register by first writing the value
+ * into MWD register. The the MCR register is then appropriately setup
+ * to finish the write operation.
+ */
+static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ int ret;
+ struct axienet_local *lp = bus->priv;
+
+ dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
+ (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
+ XAE_MDIO_MCR_PHYAD_MASK) |
+ ((reg << XAE_MDIO_MCR_REGAD_SHIFT) &
+ XAE_MDIO_MCR_REGAD_MASK) |
+ XAE_MDIO_MCR_INITIATE_MASK |
+ XAE_MDIO_MCR_OP_WRITE_MASK));
+
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * axienet_mdio_setup - MDIO setup function
+ * @lp: Pointer to axienet local data structure.
+ * @np: Pointer to device node
+ *
+ * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ *
+ * Sets up the MDIO interface by initializing the MDIO clock and enabling the
+ * MDIO interface in hardware. Register the MDIO interface.
+ **/
+int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
+{
+ int ret;
+ u32 clk_div, host_clock;
+ u32 *property_p;
+ struct mii_bus *bus;
+ struct resource res;
+ struct device_node *np1;
+
+ /* clk_div can be calculated by deriving it from the equation:
+ * fMDIO = fHOST / ((1 + clk_div) * 2)
+ *
+ * Where fMDIO <= 2500000, so we get:
+ * fHOST / ((1 + clk_div) * 2) <= 2500000
+ *
+ * Then we get:
+ * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST)
+ *
+ * Then we get:
+ * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST)
+ *
+ * Then we get:
+ * 1 / (1 + clk_div) <= (5000000 / fHOST)
+ *
+ * So:
+ * (1 + clk_div) >= (fHOST / 5000000)
+ *
+ * And finally:
+ * clk_div >= (fHOST / 5000000) - 1
+ *
+ * fHOST can be read from the flattened device tree as property
+ * "clock-frequency" from the CPU
+ */
+
+ np1 = of_find_node_by_name(NULL, "cpu");
+ if (!np1) {
+ printk(KERN_WARNING "%s(): Could not find CPU device node.",
+ __func__);
+ printk(KERN_WARNING "Setting MDIO clock divisor to "
+ "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ clk_div = DEFAULT_CLOCK_DIVISOR;
+ goto issue;
+ }
+ property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
+ if (!property_p) {
+ printk(KERN_WARNING "%s(): Could not find CPU property: "
+ "clock-frequency.", __func__);
+ printk(KERN_WARNING "Setting MDIO clock divisor to "
+ "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ clk_div = DEFAULT_CLOCK_DIVISOR;
+ goto issue;
+ }
+
+ host_clock = be32_to_cpup(property_p);
+ clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+ /* If there is any remainder from the division of
+ * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
+ * 1 to the clock divisor or we will surely be above 2.5 MHz */
+ if (host_clock % (MAX_MDIO_FREQ * 2))
+ clk_div++;
+
+ printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
+ "on %u Hz host clock.\n", __func__, clk_div, host_clock);
+
+ of_node_put(np1);
+issue:
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
+
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ np1 = of_get_parent(lp->phy_node);
+ of_address_to_resource(np1, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long) res.start);
+
+ bus->priv = lp;
+ bus->name = "Xilinx Axi Ethernet MDIO";
+ bus->read = axienet_mdio_read;
+ bus->write = axienet_mdio_write;
+ bus->parent = lp->dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+ lp->mii_bus = bus;
+
+ ret = of_mdiobus_register(bus, np1);
+ if (ret) {
+ mdiobus_free(bus);
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * axienet_mdio_teardown - MDIO remove function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Unregisters the MDIO and frees any associate memory for mii bus.
+ */
+void axienet_mdio_teardown(struct axienet_local *lp)
+{
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 79013e5731a5..77cfe5110318 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -613,7 +613,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
- skb = dev_alloc_skb(len + ALIGNMENT);
+ skb = netdev_alloc_skb(dev, len + ALIGNMENT);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
@@ -1136,10 +1136,8 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
/* Create an ethernet device instance */
ndev = alloc_etherdev(sizeof(struct net_local));
- if (!ndev) {
- dev_err(dev, "Could not allocate network device\n");
+ if (!ndev)
return -ENOMEM;
- }
dev_set_drvdata(dev, ndev);
SET_NETDEV_DEV(ndev, &ofdev->dev);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 33979c3ac943..5c69c6f93fb8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1039,7 +1039,8 @@ xirc2ps_interrupt(int irq, void *dev_id)
pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen);
- skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
+ /* 1 extra so we can use insw */
+ skb = netdev_alloc_skb(dev, pktlen + 3);
if (!skb) {
pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
index e122493ab70e..45008377c8bf 100644
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
@@ -398,11 +398,8 @@ int ixpdev_init(int __nds_count, struct net_device **__nds,
}
for (i = 0; i < nds_count; i++) {
- printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), "
- "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", nds[i]->name, i,
- nds[i]->dev_addr[0], nds[i]->dev_addr[1],
- nds[i]->dev_addr[2], nds[i]->dev_addr[3],
- nds[i]->dev_addr[4], nds[i]->dev_addr[5]);
+ printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
+ nds[i]->name, i, nds[i]->dev_addr);
}
return 0;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9537aaa50c2f..49b8b58fc5c6 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1162,7 +1162,7 @@ static void baycom_probe(struct net_device *dev)
/*
* command line settable parameters
*/
-static const char *mode[NR_PORTS] = { "", };
+static char *mode[NR_PORTS] = { "", };
static int iobase[NR_PORTS] = { 0x378, };
module_param_array(mode, charp, NULL, 0);
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 279d2296290a..f1aea0c98333 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -477,7 +477,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
/*
* command line settable parameters
*/
-static const char *mode[NR_PORTS] = { "picpar", };
+static char *mode[NR_PORTS] = { "picpar", };
static int iobase[NR_PORTS] = { 0x378, };
module_param_array(mode, charp, NULL, 0);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 96a98d2ff151..696327773fbe 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -403,7 +403,6 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
/* Allocate a new mcs */
if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
- printk(KERN_WARNING "YAM: no memory to allocate mcs\n");
release_firmware(fw);
return NULL;
}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index e68c941926f1..2a51363d9fed 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1600,12 +1600,8 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
- if (!image){
- printk(KERN_ERR "%s: Unable to allocate memory "
- "for EEPROM image\n", dev->name);
+ if (!image)
return -ENOMEM;
- }
-
if (rrpriv->fw_running){
printk("%s: Firmware already running\n", dev->name);
@@ -1637,8 +1633,6 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
if (!image || !oldimage) {
- printk(KERN_ERR "%s: Unable to allocate memory "
- "for EEPROM image\n", dev->name);
error = -ENOMEM;
goto wf_out;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dec5836ae075..c35824552792 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -49,6 +49,7 @@ struct hv_netvsc_packet {
struct hv_device *device;
bool is_data_pkt;
+ u16 vlan_tci;
/*
* Valid only for receives when we break a xfer page packet
@@ -926,9 +927,40 @@ struct rndis_oobd {
struct rndis_per_packet_info {
u32 size;
u32 type;
- u32 per_pkt_info_offset;
+ u32 ppi_offset;
+};
+
+enum ndis_per_pkt_info_type {
+ TCPIP_CHKSUM_PKTINFO,
+ IPSEC_PKTINFO,
+ TCP_LARGESEND_PKTINFO,
+ CLASSIFICATION_HANDLE_PKTINFO,
+ NDIS_RESERVED,
+ SG_LIST_PKTINFO,
+ IEEE_8021Q_INFO,
+ ORIGINAL_PKTINFO,
+ PACKET_CANCEL_ID,
+ ORIGINAL_NET_BUFLIST,
+ CACHED_NET_BUFLIST,
+ SHORT_PKT_PADINFO,
+ MAX_PER_PKT_INFO
+};
+
+struct ndis_pkt_8021q_info {
+ union {
+ struct {
+ u32 pri:3; /* User Priority */
+ u32 cfi:1; /* Canonical Format ID */
+ u32 vlanid:12; /* VLAN ID */
+ u32 reserved:16;
+ };
+ u32 value;
+ };
};
+#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_pkt_8021q_info))
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8965b45ce5a5..d025c83cd12a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -300,6 +300,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+ init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
@@ -341,7 +342,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
- ndis_version = 0x00050000;
+ ndis_version = 0x00050001;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 610860f28968..dd294783b5c5 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -159,7 +159,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_filter_packet), GFP_ATOMIC);
+ sizeof(struct rndis_filter_packet) +
+ NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -169,6 +170,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
+ packet->vlan_tci = skb->vlan_tci;
+
packet->extension = (void *)(unsigned long)packet +
sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer));
@@ -220,13 +223,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
} else {
- /* we are shutting down or bus overloaded, just drop packet */
- net->stats.tx_dropped++;
kfree(packet);
- dev_kfree_skb_any(skb);
}
- return NETDEV_TX_OK;
+ return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
/*
@@ -267,13 +267,10 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet)
{
- struct net_device *net = dev_get_drvdata(&device_obj->device);
+ struct net_device *net;
struct sk_buff *skb;
- struct netvsc_device *net_device;
-
- net_device = hv_get_drvdata(device_obj);
- net = net_device->ndev;
+ net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
if (!net) {
netdev_err(net, "got receive callback but net device"
" not initialized yet\n");
@@ -296,6 +293,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
+ skb->vlan_tci = packet->vlan_tci;
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
@@ -410,7 +408,7 @@ static int netvsc_probe(struct hv_device *dev,
/* TODO: Add GSO and Checksum offload */
net->hw_features = NETIF_F_SG;
- net->features = NETIF_F_SG;
+ net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX;
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 133b7fbf8595..d6be64bcefd4 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include "hyperv_net.h"
@@ -303,12 +304,39 @@ static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
}
}
+/*
+ * Get the Per-Packet-Info with the specified type
+ * return NULL if not found.
+ */
+static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
+{
+ struct rndis_per_packet_info *ppi;
+ int len;
+
+ if (rpkt->per_pkt_info_offset == 0)
+ return NULL;
+
+ ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
+ rpkt->per_pkt_info_offset);
+ len = rpkt->per_pkt_info_len;
+
+ while (len > 0) {
+ if (ppi->type == type)
+ return (void *)((ulong)ppi + ppi->ppi_offset);
+ len -= ppi->size;
+ ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
+ }
+
+ return NULL;
+}
+
static void rndis_filter_receive_data(struct rndis_device *dev,
struct rndis_message *msg,
struct hv_netvsc_packet *pkt)
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
+ struct ndis_pkt_8021q_info *vlan;
rndis_pkt = &msg->msg.pkt;
@@ -344,6 +372,14 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->is_data_pkt = true;
+ vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+ if (vlan) {
+ pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
+ (vlan->pri << VLAN_PRIO_SHIFT);
+ } else {
+ pkt->vlan_tci = 0;
+ }
+
netvsc_recv_callback(dev->net_dev->dev, pkt);
}
@@ -352,8 +388,7 @@ int rndis_filter_receive(struct hv_device *dev,
{
struct netvsc_device *net_dev = hv_get_drvdata(dev);
struct rndis_device *rndis_dev;
- struct rndis_message rndis_msg;
- struct rndis_message *rndis_hdr;
+ struct rndis_message *rndis_msg;
struct net_device *ndev;
if (!net_dev)
@@ -375,46 +410,32 @@ int rndis_filter_receive(struct hv_device *dev,
return -ENODEV;
}
- rndis_hdr = pkt->data;
-
- /* Make sure we got a valid rndis message */
- if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
- (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
- netdev_err(ndev, "incoming rndis message buffer overflow "
- "detected (got %u, max %zu)..marking it an error!\n",
- rndis_hdr->msg_len,
- sizeof(struct rndis_message));
- }
+ rndis_msg = pkt->data;
- memcpy(&rndis_msg, rndis_hdr,
- (rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
- sizeof(struct rndis_message) :
- rndis_hdr->msg_len);
+ dump_rndis_message(dev, rndis_msg);
- dump_rndis_message(dev, &rndis_msg);
-
- switch (rndis_msg.ndis_msg_type) {
+ switch (rndis_msg->ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
/* data msg */
- rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
+ rndis_filter_receive_data(rndis_dev, rndis_msg, pkt);
break;
case REMOTE_NDIS_INITIALIZE_CMPLT:
case REMOTE_NDIS_QUERY_CMPLT:
case REMOTE_NDIS_SET_CMPLT:
/* completion msgs */
- rndis_filter_receive_response(rndis_dev, &rndis_msg);
+ rndis_filter_receive_response(rndis_dev, rndis_msg);
break;
case REMOTE_NDIS_INDICATE_STATUS_MSG:
/* notification msgs */
- rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
+ rndis_filter_receive_indicate_status(rndis_dev, rndis_msg);
break;
default:
netdev_err(ndev,
"unhandled rndis message (type %u len %u)\n",
- rndis_msg.ndis_msg_type,
- rndis_msg.msg_len);
+ rndis_msg->ndis_msg_type,
+ rndis_msg->msg_len);
break;
}
@@ -758,66 +779,88 @@ int rndis_filter_open(struct hv_device *dev)
int rndis_filter_close(struct hv_device *dev)
{
- struct netvsc_device *netDevice = hv_get_drvdata(dev);
+ struct netvsc_device *nvdev = hv_get_drvdata(dev);
- if (!netDevice)
+ if (!nvdev)
return -EINVAL;
- return rndis_filter_close_device(netDevice->extension);
+ return rndis_filter_close_device(nvdev->extension);
}
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt)
{
int ret;
- struct rndis_filter_packet *filterPacket;
- struct rndis_message *rndisMessage;
- struct rndis_packet *rndisPacket;
- u32 rndisMessageSize;
+ struct rndis_filter_packet *filter_pkt;
+ struct rndis_message *rndis_msg;
+ struct rndis_packet *rndis_pkt;
+ u32 rndis_msg_size;
+ bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
/* Add the rndis header */
- filterPacket = (struct rndis_filter_packet *)pkt->extension;
-
- memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
-
- rndisMessage = &filterPacket->msg;
- rndisMessageSize = RNDIS_MESSAGE_SIZE(struct rndis_packet);
-
- rndisMessage->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
- rndisMessage->msg_len = pkt->total_data_buflen +
- rndisMessageSize;
-
- rndisPacket = &rndisMessage->msg.pkt;
- rndisPacket->data_offset = sizeof(struct rndis_packet);
- rndisPacket->data_len = pkt->total_data_buflen;
+ filter_pkt = (struct rndis_filter_packet *)pkt->extension;
+
+ rndis_msg = &filter_pkt->msg;
+ rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+ if (isvlan)
+ rndis_msg_size += NDIS_VLAN_PPI_SIZE;
+
+ rndis_msg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
+ rndis_msg->msg_len = pkt->total_data_buflen +
+ rndis_msg_size;
+
+ rndis_pkt = &rndis_msg->msg.pkt;
+ rndis_pkt->data_offset = sizeof(struct rndis_packet);
+ if (isvlan)
+ rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
+ rndis_pkt->data_len = pkt->total_data_buflen;
+
+ if (isvlan) {
+ struct rndis_per_packet_info *ppi;
+ struct ndis_pkt_8021q_info *vlan;
+
+ rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+ rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
+
+ ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
+ rndis_pkt->per_pkt_info_offset);
+ ppi->size = NDIS_VLAN_PPI_SIZE;
+ ppi->type = IEEE_8021Q_INFO;
+ ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
+
+ vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
+ ppi->ppi_offset);
+ vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
+ vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
pkt->is_data_pkt = true;
- pkt->page_buf[0].pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
+ pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
pkt->page_buf[0].offset =
- (unsigned long)rndisMessage & (PAGE_SIZE-1);
- pkt->page_buf[0].len = rndisMessageSize;
+ (unsigned long)rndis_msg & (PAGE_SIZE-1);
+ pkt->page_buf[0].len = rndis_msg_size;
/* Add one page_buf if the rndis msg goes beyond page boundary */
- if (pkt->page_buf[0].offset + rndisMessageSize > PAGE_SIZE) {
+ if (pkt->page_buf[0].offset + rndis_msg_size > PAGE_SIZE) {
int i;
for (i = pkt->page_buf_cnt; i > 1; i--)
pkt->page_buf[i] = pkt->page_buf[i-1];
pkt->page_buf_cnt++;
pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
- rndisMessage + pkt->page_buf[0].len)) >> PAGE_SHIFT;
+ rndis_msg + pkt->page_buf[0].len)) >> PAGE_SHIFT;
pkt->page_buf[1].offset = 0;
- pkt->page_buf[1].len = rndisMessageSize - pkt->page_buf[0].len;
+ pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
}
/* Save the packet send completion and context */
- filterPacket->completion = pkt->completion.send.send_completion;
- filterPacket->completion_ctx =
+ filter_pkt->completion = pkt->completion.send.send_completion;
+ filter_pkt->completion_ctx =
pkt->completion.send.send_completion_ctx;
/* Use ours */
pkt->completion.send.send_completion = rndis_filter_send_completion;
- pkt->completion.send.send_completion_ctx = filterPacket;
+ pkt->completion.send.send_completion_ctx = filter_pkt;
ret = netvsc_send(dev, pkt);
if (ret != 0) {
@@ -826,9 +869,9 @@ int rndis_filter_send(struct hv_device *dev,
* above
*/
pkt->completion.send.send_completion =
- filterPacket->completion;
+ filter_pkt->completion;
pkt->completion.send.send_completion_ctx =
- filterPacket->completion_ctx;
+ filter_pkt->completion_ctx;
}
return ret;
@@ -836,10 +879,10 @@ int rndis_filter_send(struct hv_device *dev,
static void rndis_filter_send_completion(void *ctx)
{
- struct rndis_filter_packet *filterPacket = ctx;
+ struct rndis_filter_packet *filter_pkt = ctx;
/* Pass it back to the original handler */
- filterPacket->completion(filterPacket->completion_ctx);
+ filter_pkt->completion(filter_pkt->completion_ctx);
}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e05b645bbc32..344dceb1aaf9 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -184,7 +184,7 @@ static void ifb_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 963067d3bda2..dcc80d652b78 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1368,7 +1368,7 @@ static int ali_ircc_net_open(struct net_device *dev)
IRDA_WARNING("%s, unable to allocate dma=%d\n",
ALI_IRCC_DRIVER_NAME,
self->io.dma);
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
return -EAGAIN;
}
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 64f403da101c..617a446d126c 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1608,7 +1608,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL);
if (!self->ringbuf)
{
- printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
err = -ENOMEM;
goto freeregion;
}
@@ -1647,7 +1646,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
if (!ok)
{
- printk (KERN_ERR DRIVER_NAME ": can't allocate rx/tx buffers\n");
err = -ENOMEM;
goto freebufs;
}
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 81d5275a15e2..ff16daf33ae1 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -128,20 +128,20 @@ struct pxa_irda {
static inline void pxa_irda_disable_clk(struct pxa_irda *si)
{
if (si->cur_clk)
- clk_disable(si->cur_clk);
+ clk_disable_unprepare(si->cur_clk);
si->cur_clk = NULL;
}
static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
{
si->cur_clk = si->fir_clk;
- clk_enable(si->fir_clk);
+ clk_prepare_enable(si->fir_clk);
}
static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
{
si->cur_clk = si->sir_clk;
- clk_enable(si->sir_clk);
+ clk_prepare_enable(si->sir_clk);
}
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2d456dd164fb..1a89fd459dd5 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1495,14 +1495,14 @@ static int via_ircc_net_open(struct net_device *dev)
if (request_dma(self->io.dma, dev->name)) {
IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
self->io.dma);
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
return -EAGAIN;
}
if (self->io.dma2 != self->io.dma) {
if (request_dma(self->io.dma2, dev->name)) {
IRDA_WARNING("%s, unable to allocate dma2=%d\n",
driver_name, self->io.dma2);
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
free_dma(self->io.dma);
return -EAGAIN;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 7d43506c7032..f5bb92f15880 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -1172,7 +1172,7 @@ static int w83977af_net_open(struct net_device *dev)
* and clean up on failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- free_irq(self->io.irq, self);
+ free_irq(self->io.irq, dev);
return -EAGAIN;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9ea99217f116..f975afdc315c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -372,6 +372,7 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
} else {
/* Rehash and update the device filters */
@@ -687,7 +688,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
if (!tb[IFLA_ADDRESS])
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
if (!macvlan_port_exists(lowerdev)) {
err = macvlan_port_create(lowerdev);
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 16fbb11d92ac..8403316eb02b 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -190,6 +190,9 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
int reg;
u32 speed;
+ BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22);
+ BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45);
+
ecmd->transceiver = XCVR_INTERNAL;
ecmd->phy_address = mdio->prtad;
ecmd->mdio_support =
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c70c2332d15e..4a99c3919037 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
{
@@ -74,7 +74,7 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
/* this isn't fully supported at higher layers */
ecmd->phy_address = mii->phy_id;
- ecmd->mdio_support = MDIO_SUPPORTS_C22;
+ ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22;
ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e8882023576b..f9347ea3d381 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -169,10 +169,8 @@ static struct netconsole_target *alloc_param_target(char *target_config)
* Note that these targets get their config_item fields zeroed-out.
*/
nt = kzalloc(sizeof(*nt), GFP_KERNEL);
- if (!nt) {
- printk(KERN_ERR "netconsole: failed to allocate memory\n");
+ if (!nt)
goto fail;
- }
nt->np.name = "netconsole";
strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
@@ -551,10 +549,8 @@ static struct config_item *make_netconsole_target(struct config_group *group,
* Target is disabled at creation (enabled == 0).
*/
nt = kzalloc(sizeof(*nt), GFP_KERNEL);
- if (!nt) {
- printk(KERN_ERR "netconsole: failed to allocate memory\n");
+ if (!nt)
return ERR_PTR(-ENOMEM);
- }
nt->np.name = "netconsole";
strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fbdcdf83cbfd..0e01f4e5cd64 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -15,6 +15,11 @@ if PHYLIB
comment "MII PHY device drivers"
+config AMD_PHY
+ tristate "Drivers for the AMD PHYs"
+ ---help---
+ Currently supports the am79c874
+
config MARVELL_PHY
tristate "Drivers for Marvell PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e15c83fecbe0..b7438b1b94b9 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
+obj-$(CONFIG_AMD_PHY) += amd.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
new file mode 100644
index 000000000000..cfabd5fe5372
--- /dev/null
+++ b/drivers/net/phy/amd.c
@@ -0,0 +1,102 @@
+/*
+ * Driver for AMD am79c PHYs
+ *
+ * Author: Heiko Schocher <hs@denx.de>
+ *
+ * Copyright (c) 2011 DENX Software Engineering GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#define PHY_ID_AM79C874 0x0022561b
+
+#define MII_AM79C_IR 17 /* Interrupt Status/Control Register */
+#define MII_AM79C_IR_EN_LINK 0x0400 /* IR enable Linkstate */
+#define MII_AM79C_IR_EN_ANEG 0x0100 /* IR enable Aneg Complete */
+#define MII_AM79C_IR_IMASK_INIT (MII_AM79C_IR_EN_LINK | MII_AM79C_IR_EN_ANEG)
+
+MODULE_DESCRIPTION("AMD PHY driver");
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_LICENSE("GPL");
+
+static int am79c_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, MII_BMSR);
+ if (err < 0)
+ return err;
+
+ err = phy_read(phydev, MII_AM79C_IR);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int am79c_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int am79c_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_AM79C_IR, MII_AM79C_IR_IMASK_INIT);
+ else
+ err = phy_write(phydev, MII_AM79C_IR, 0);
+
+ return err;
+}
+
+static struct phy_driver am79c_driver = {
+ .phy_id = PHY_ID_AM79C874,
+ .name = "AM79C874",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = am79c_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = am79c_ack_interrupt,
+ .config_intr = am79c_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init am79c_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&am79c_driver);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void __exit am79c_exit(void)
+{
+ phy_driver_unregister(&am79c_driver);
+}
+
+module_init(am79c_init);
+module_exit(am79c_exit);
+
+static struct mdio_device_id __maybe_unused amd_tbl[] = {
+ { PHY_ID_AM79C874, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, amd_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e8be47d6d7d0..60338ff63092 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -355,8 +355,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
}
}
- if (clk125en == false ||
- (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+ if (!clk125en || (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
else
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
@@ -373,8 +372,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
orig = val;
- if (clk125en == false ||
- (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+ if (!clk125en || (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
val |= BCM54XX_SHD_APD_EN;
else
val &= ~BCM54XX_SHD_APD_EN;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ba3c59147aa7..dd7ae19579d1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1243,7 +1243,7 @@ static void __exit dp83640_exit(void)
}
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
-MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>");
MODULE_LICENSE("GPL");
module_init(dp83640_init);
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index a9e9ca8a86ed..1a5a316cc968 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1260,10 +1260,8 @@ static void plip_attach (struct parport *port)
sprintf(name, "plip%d", unit);
dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev) {
- printk(KERN_ERR "plip: memory squeeze\n");
+ if (!dev)
return;
- }
strcpy(dev->name, name);
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index c6ba64380829..af95a98fd86f 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -26,7 +26,7 @@
#include <linux/poll.h>
#include <linux/crc-ccitt.h>
#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/init.h>
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 1dbdf82a6dfd..602c625d95d5 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -1,34 +1,12 @@
/*
- * ==FILEVERSION 980319==
- *
* ppp_deflate.c - interface the zlib procedures for Deflate compression
* and decompression (as used by gzip) to the PPP code.
- * This version is for use with Linux kernel 1.3.X.
- *
- * Copyright (c) 1994 The Australian National University.
- * All rights reserved.
- *
- * Permission to use, copy, modify, and distribute this software and its
- * documentation is hereby granted, provided that the above copyright
- * notice appears in all copies. This software is provided without any
- * warranty, express or implied. The Australian National University
- * makes no representations about the suitability of this software for
- * any purpose.
- *
- * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
- * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
- * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
- * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
*
- * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
- * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
- * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
- * OR MODIFICATIONS.
+ * Copyright 1994-1998 Paul Mackerras.
*
- * From: deflate.c,v 1.1 1996/01/18 03:17:48 paulus Exp
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 486b4048850d..159da2905fe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -32,7 +32,7 @@
#include <linux/poll.h>
#include <linux/ppp_defs.h>
#include <linux/filter.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/ppp-comp.h>
#include <linux/skbuff.h>
@@ -1031,7 +1031,7 @@ static void ppp_setup(struct net_device *dev)
{
dev->netdev_ops = &ppp_netdev_ops;
dev->hard_header_len = PPP_HDRLEN;
- dev->mtu = PPP_MTU;
+ dev->mtu = PPP_MRU;
dev->addr_len = 0;
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
@@ -2136,7 +2136,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
skb->len += p->len;
skb->data_len += p->len;
- skb->truesize += p->len;
+ skb->truesize += p->truesize;
if (p == tail)
break;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 736a39ee05bb..55e466c511d5 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -39,7 +39,7 @@
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index bc9a4bb31980..2fa1a9b6f498 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -72,7 +72,7 @@
#include <linux/if_pppox.h>
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/notifier.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 8c0d170dabcd..2940e9fe351b 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/kmod.h>
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index df884dde2a51..885dbdd9c39e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -23,7 +23,7 @@
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
#include <linux/if_pppox.h>
-#include <linux/if_ppp.h>
+#include <linux/ppp-ioctl.h>
#include <linux/notifier.h>
#include <linux/file.h>
#include <linux/in.h>
@@ -481,7 +481,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
- po->chan.mtu = PPP_MTU;
+ po->chan.mtu = PPP_MRU;
ip_rt_put(rt);
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
@@ -670,10 +670,8 @@ static int __init pptp_init_module(void)
pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
- if (!callid_sock) {
- pr_err("PPTP: cann't allocate memory\n");
+ if (!callid_sock)
return -ENOMEM;
- }
err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
if (err) {
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 7145714a5ec9..a57f05726b57 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -497,8 +497,6 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
/* Allocate our net_device structure */
ndev = alloc_etherdev(sizeof(struct rionet_private));
if (ndev == NULL) {
- printk(KERN_INFO "%s: could not allocate ethernet device.\n",
- DRV_NAME);
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index ba08341fb92c..69345dfae0fd 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -1296,10 +1296,8 @@ static int __init slip_init(void)
slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev,
GFP_KERNEL);
- if (!slip_devs) {
- printk(KERN_ERR "SLIP: Can't allocate slip devices array.\n");
+ if (!slip_devs)
return -ENOMEM;
- }
/* Fill in our line protocol discipline, and register it */
status = tty_register_ldisc(N_SLIP, &sl_ldisc);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6b678f38e5ce..8f81805c6825 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -868,6 +868,7 @@ static int team_set_mac_address(struct net_device *dev, void *p)
struct team_port *port;
struct sockaddr *addr = p;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
@@ -1087,7 +1088,7 @@ static int team_newlink(struct net *src_net, struct net_device *dev,
int err;
if (tb[IFLA_ADDRESS] == NULL)
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
err = register_netdevice(dev);
if (err)
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index ef9fdf3652f6..d7c292aa76b1 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -674,15 +674,11 @@ static int xl_open(struct net_device *dev)
/* These MUST be on 8 byte boundaries */
xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
if (xl_priv->xl_tx_ring == NULL) {
- printk(KERN_WARNING "%s: Not enough memory to allocate tx buffers.\n",
- dev->name);
free_irq(dev->irq,dev);
return -ENOMEM;
}
xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
if (xl_priv->xl_rx_ring == NULL) {
- printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
- dev->name);
free_irq(dev->irq,dev);
kfree(xl_priv->xl_tx_ring);
return -ENOMEM;
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 6153cfd696b6..1cdc034f6aec 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -171,7 +171,6 @@ static int __devinit madgemc_probe(struct device *device)
card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
if (card==NULL) {
- printk("madgemc: unable to allocate card struct\n");
ret = -ENOMEM;
goto getout1;
}
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 65e9cf3a71fe..102f896bbc58 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1525,10 +1525,8 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
/* Check if adapter is opened, avoiding COMMAND_REJECT
* interrupt by the adapter!
*/
- if(tp->AdapterOpenFlag == 0)
- {
- if(tp->CMDqueue & OC_OPEN)
- {
+ if (tp->AdapterOpenFlag == 0) {
+ if (tp->CMDqueue & OC_OPEN) {
/* Execute OPEN command */
tp->CMDqueue ^= OC_OPEN;
@@ -1536,21 +1534,17 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
tp->scb.Parm[0] = LOWORD(Addr);
tp->scb.Parm[1] = HIWORD(Addr);
tp->scb.CMD = OPEN;
- }
- else
+ } else
/* No OPEN command queued, but adapter closed. Note:
* We'll try to re-open the adapter in DriverPoll()
*/
return; /* No adapter command issued */
- }
- else
- {
+ } else {
/* Adapter is open; evaluate command queue: try to execute
* outstanding commands (depending on priority!) CLOSE
* command queued
*/
- if(tp->CMDqueue & OC_CLOSE)
- {
+ if (tp->CMDqueue & OC_CLOSE) {
tp->CMDqueue ^= OC_CLOSE;
tp->AdapterOpenFlag = 0;
tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */
@@ -1560,109 +1554,70 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
tp->CMDqueue |= OC_OPEN; /* re-open adapter */
else
tp->CMDqueue = 0; /* no more commands */
- }
- else
- {
- if(tp->CMDqueue & OC_RECEIVE)
- {
- tp->CMDqueue ^= OC_RECEIVE;
- Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
- tp->scb.Parm[0] = LOWORD(Addr);
- tp->scb.Parm[1] = HIWORD(Addr);
- tp->scb.CMD = RECEIVE;
- }
- else
- {
- if(tp->CMDqueue & OC_TRANSMIT_HALT)
- {
- /* NOTE: TRANSMIT.HALT must be checked
- * before TRANSMIT.
- */
- tp->CMDqueue ^= OC_TRANSMIT_HALT;
- tp->scb.CMD = TRANSMIT_HALT;
-
- /* Parm[0] and Parm[1] are ignored
- * but should be set to zero!
- */
- tp->scb.Parm[0] = 0;
- tp->scb.Parm[1] = 0;
- }
- else
- {
- if(tp->CMDqueue & OC_TRANSMIT)
- {
- /* NOTE: TRANSMIT must be
- * checked after TRANSMIT.HALT
- */
- if(tp->TransmitCommandActive)
- {
- if(!tp->TransmitHaltScheduled)
- {
- tp->TransmitHaltScheduled = 1;
- tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT) ;
- }
- tp->TransmitCommandActive = 0;
- return;
- }
-
- tp->CMDqueue ^= OC_TRANSMIT;
- tms380tr_cancel_tx_queue(tp);
- Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
- tp->scb.Parm[0] = LOWORD(Addr);
- tp->scb.Parm[1] = HIWORD(Addr);
- tp->scb.CMD = TRANSMIT;
- tp->TransmitCommandActive = 1;
- }
- else
- {
- if(tp->CMDqueue & OC_MODIFY_OPEN_PARMS)
- {
- tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
- tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
- tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
- tp->scb.Parm[1] = 0; /* is ignored but should be zero */
- tp->scb.CMD = MODIFY_OPEN_PARMS;
- }
- else
- {
- if(tp->CMDqueue & OC_SET_FUNCT_ADDR)
- {
- tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
- tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
- tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
- tp->scb.CMD = SET_FUNCT_ADDR;
- }
- else
- {
- if(tp->CMDqueue & OC_SET_GROUP_ADDR)
- {
- tp->CMDqueue ^= OC_SET_GROUP_ADDR;
- tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
- tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
- tp->scb.CMD = SET_GROUP_ADDR;
- }
- else
- {
- if(tp->CMDqueue & OC_READ_ERROR_LOG)
- {
- tp->CMDqueue ^= OC_READ_ERROR_LOG;
- Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
- tp->scb.Parm[0] = LOWORD(Addr);
- tp->scb.Parm[1] = HIWORD(Addr);
- tp->scb.CMD = READ_ERROR_LOG;
- }
- else
- {
- printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
- tp->CMDqueue = 0;
- return;
- }
- }
- }
- }
- }
+ } else if (tp->CMDqueue & OC_RECEIVE) {
+ tp->CMDqueue ^= OC_RECEIVE;
+ Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = RECEIVE;
+ } else if (tp->CMDqueue & OC_TRANSMIT_HALT) {
+ /* NOTE: TRANSMIT.HALT must be checked
+ * before TRANSMIT.
+ */
+ tp->CMDqueue ^= OC_TRANSMIT_HALT;
+ tp->scb.CMD = TRANSMIT_HALT;
+
+ /* Parm[0] and Parm[1] are ignored
+ * but should be set to zero!
+ */
+ tp->scb.Parm[0] = 0;
+ tp->scb.Parm[1] = 0;
+ } else if (tp->CMDqueue & OC_TRANSMIT) {
+ /* NOTE: TRANSMIT must be
+ * checked after TRANSMIT.HALT
+ */
+ if (tp->TransmitCommandActive) {
+ if (!tp->TransmitHaltScheduled) {
+ tp->TransmitHaltScheduled = 1;
+ tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT);
}
+ tp->TransmitCommandActive = 0;
+ return;
}
+
+ tp->CMDqueue ^= OC_TRANSMIT;
+ tms380tr_cancel_tx_queue(tp);
+ Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = TRANSMIT;
+ tp->TransmitCommandActive = 1;
+ } else if (tp->CMDqueue & OC_MODIFY_OPEN_PARMS) {
+ tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
+ tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
+ tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
+ tp->scb.Parm[1] = 0; /* is ignored but should be zero */
+ tp->scb.CMD = MODIFY_OPEN_PARMS;
+ } else if (tp->CMDqueue & OC_SET_FUNCT_ADDR) {
+ tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
+ tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
+ tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
+ tp->scb.CMD = SET_FUNCT_ADDR;
+ } else if (tp->CMDqueue & OC_SET_GROUP_ADDR) {
+ tp->CMDqueue ^= OC_SET_GROUP_ADDR;
+ tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
+ tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
+ tp->scb.CMD = SET_GROUP_ADDR;
+ } else if (tp->CMDqueue & OC_READ_ERROR_LOG) {
+ tp->CMDqueue ^= OC_READ_ERROR_LOG;
+ Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = READ_ERROR_LOG;
+ } else {
+ printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
+ tp->CMDqueue = 0;
+ return;
}
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2d7601dd6660..74d7f76d14a3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -531,7 +531,7 @@ static void tun_net_init(struct net_device *dev)
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 4bad899fb38f..833e32f8d63b 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -401,6 +401,7 @@ config USB_NET_KALMIA
config USB_NET_QMI_WWAN
tristate "QMI WWAN driver for Qualcomm MSM based 3G and LTE modems"
depends on USB_USBNET
+ select USB_WDM
help
Support WWAN LTE/3G devices based on Qualcomm Mobile Data Modem
(MDM) chipsets. Examples of such devices are
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index d6da5eed5453..5ee032cafade 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -305,88 +305,40 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
- u8 *head;
- u32 header;
- char *packet;
- struct sk_buff *ax_skb;
- u16 size;
+ int offset = 0;
- head = (u8 *) skb->data;
- memcpy(&header, head, sizeof(header));
- le32_to_cpus(&header);
- packet = head + sizeof(header);
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *ax_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
- skb_pull(skb, 4);
-
- while (skb->len > 0) {
- if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
+ offset += sizeof(u32);
/* get the packet length */
- size = (u16) (header & 0x000007ff);
-
- if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
- u8 alignment = (unsigned long)skb->data & 0x3;
- if (alignment != 0x2) {
- /*
- * not 16bit aligned so use the room provided by
- * the 32 bit header to align the data
- *
- * note we want 16bit alignment as MAC header is
- * 14bytes thus ip header will be aligned on
- * 32bit boundary so accessing ipheader elements
- * using a cast to struct ip header wont cause
- * an unaligned accesses.
- */
- u8 realignment = (alignment + 2) & 0x3;
- memmove(skb->data - realignment,
- skb->data,
- size);
- skb->data -= realignment;
- skb_set_tail_pointer(skb, size);
- }
- return 2;
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
+ return 0;
}
- if (size > dev->net->mtu + ETH_HLEN) {
+ if ((size > dev->net->mtu + ETH_HLEN) ||
+ (size + offset > skb->len)) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
return 0;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
- u8 alignment = (unsigned long)packet & 0x3;
- ax_skb->len = size;
-
- if (alignment != 0x2) {
- /*
- * not 16bit aligned use the room provided by
- * the 32 bit header to align the data
- */
- u8 realignment = (alignment + 2) & 0x3;
- memmove(packet - realignment, packet, size);
- packet -= realignment;
- }
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
- usbnet_skb_return(dev, ax_skb);
- } else {
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!ax_skb)
return 0;
- }
-
- skb_pull(skb, (size + 1) & 0xfffe);
- if (skb->len < sizeof(header))
- break;
+ skb_put(ax_skb, size);
+ memcpy(ax_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, ax_skb);
- head = (u8 *) skb->data;
- memcpy(&header, head, sizeof(header));
- le32_to_cpus(&header);
- packet = head + sizeof(header);
- skb_pull(skb, 4);
+ offset += (size + 1) & 0xfffe;
}
- if (skb->len < 0) {
+ if (skb->len != offset) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
skb->len);
return 0;
@@ -1541,7 +1493,7 @@ static const struct driver_info ax88772_info = {
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 3a539a9cac54..7adc9f6b0ea1 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
/*
* cdc_ncm.c
*
- * Copyright (C) ST-Ericsson 2010-2011
+ * Copyright (C) ST-Ericsson 2010-2012
* Contact: Alexey Orishko <alexey.orishko@stericsson.com>
* Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
*
@@ -47,20 +47,19 @@
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/usb.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
#include <linux/atomic.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "04-Aug-2011"
+#define DRIVER_VERSION "14-Mar-2012"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
/* Maximum NTB length */
-#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
-#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
@@ -68,19 +67,18 @@
#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
/* Default value for MaxDatagramSize */
-#define CDC_NCM_MAX_DATAGRAM_SIZE 2048 /* bytes */
+#define CDC_NCM_MAX_DATAGRAM_SIZE 8192 /* bytes */
/*
* Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
- * the last NULL entry. Any additional datagrams in NTB would be discarded.
+ * the last NULL entry.
*/
-#define CDC_NCM_DPT_DATAGRAMS_MAX 32
-
-/* Maximum amount of IN datagrams in NTB */
-#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
+#define CDC_NCM_DPT_DATAGRAMS_MAX 40
/* Restart the timer, if amount of datagrams is less than given value */
#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
+#define CDC_NCM_TIMER_PENDING_CNT 2
+#define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC)
/* The following macro defines the minimum header space */
#define CDC_NCM_MIN_HDR_SIZE \
@@ -94,10 +92,10 @@ struct cdc_ncm_data {
};
struct cdc_ncm_ctx {
- struct cdc_ncm_data rx_ncm;
struct cdc_ncm_data tx_ncm;
struct usb_cdc_ncm_ntb_parameters ncm_parm;
- struct timer_list tx_timer;
+ struct hrtimer tx_timer;
+ struct tasklet_struct bh;
const struct usb_cdc_ncm_desc *func_desc;
const struct usb_cdc_header_desc *header_desc;
@@ -117,6 +115,7 @@ struct cdc_ncm_ctx {
struct sk_buff *tx_rem_skb;
spinlock_t mtx;
+ atomic_t stop;
u32 tx_timer_pending;
u32 tx_curr_offset;
@@ -132,10 +131,13 @@ struct cdc_ncm_ctx {
u16 tx_modulus;
u16 tx_ndp_modulus;
u16 tx_seq;
+ u16 rx_seq;
u16 connected;
};
-static void cdc_ncm_tx_timeout(unsigned long arg);
+static void cdc_ncm_txpath_bh(unsigned long param);
+static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
+static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static const struct driver_info cdc_ncm_info;
static struct usb_driver cdc_ncm_driver;
static const struct ethtool_ops cdc_ncm_ethtool_ops;
@@ -361,27 +363,25 @@ size_err:
if (err < 0) {
pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
CDC_NCM_MIN_DATAGRAM_SIZE);
- kfree(max_datagram_size);
} else {
ctx->max_datagram_size =
le16_to_cpu(*max_datagram_size);
/* Check Eth descriptor value */
- if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
- if (ctx->max_datagram_size > eth_max_sz)
+ if (ctx->max_datagram_size > eth_max_sz)
ctx->max_datagram_size = eth_max_sz;
- } else {
- if (ctx->max_datagram_size >
- CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size =
+
+ if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size =
CDC_NCM_MAX_DATAGRAM_SIZE;
- }
if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
ctx->max_datagram_size =
CDC_NCM_MIN_DATAGRAM_SIZE;
/* if value changed, update device */
- err = usb_control_msg(ctx->udev,
+ if (ctx->max_datagram_size !=
+ le16_to_cpu(*max_datagram_size)) {
+ err = usb_control_msg(ctx->udev,
usb_sndctrlpipe(ctx->udev, 0),
USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT
@@ -389,14 +389,14 @@ size_err:
0,
iface_no, max_datagram_size,
2, 1000);
- kfree(max_datagram_size);
-max_dgram_err:
- if (err < 0)
- pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+ if (err < 0)
+ pr_debug("SET_MAX_DGRAM_SIZE failed\n");
+ }
}
-
+ kfree(max_datagram_size);
}
+max_dgram_err:
if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
@@ -441,8 +441,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
if (ctx == NULL)
return;
- del_timer_sync(&ctx->tx_timer);
-
if (ctx->tx_rem_skb != NULL) {
dev_kfree_skb_any(ctx->tx_rem_skb);
ctx->tx_rem_skb = NULL;
@@ -469,7 +467,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if (ctx == NULL)
return -ENODEV;
- init_timer(&ctx->tx_timer);
+ hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
+ ctx->bh.data = (unsigned long)ctx;
+ ctx->bh.func = cdc_ncm_txpath_bh;
+ atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
ctx->netdev = dev->net;
@@ -579,11 +581,7 @@ advance:
if (temp)
goto error2;
- dev_info(&dev->udev->dev, "MAC-Address: "
- "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- dev->net->dev_addr[0], dev->net->dev_addr[1],
- dev->net->dev_addr[2], dev->net->dev_addr[3],
- dev->net->dev_addr[4], dev->net->dev_addr[5]);
+ dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
dev->in = usb_rcvbulkpipe(dev->udev,
ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
@@ -621,6 +619,13 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
if (ctx == NULL)
return; /* no setup */
+ atomic_set(&ctx->stop, 1);
+
+ if (hrtimer_active(&ctx->tx_timer))
+ hrtimer_cancel(&ctx->tx_timer);
+
+ tasklet_kill(&ctx->bh);
+
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
@@ -791,7 +796,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->tx_curr_last_offset = last_offset;
/* set the pending count */
if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
- ctx->tx_timer_pending = 2;
+ ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
goto exit_no_skb;
} else {
@@ -871,44 +876,49 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* return skb */
ctx->tx_curr_skb = NULL;
+ ctx->netdev->stats.tx_packets += ctx->tx_curr_frame_num;
return skb_out;
exit_no_skb:
+ /* Start timer, if there is a remaining skb */
+ if (ctx->tx_curr_skb != NULL)
+ cdc_ncm_tx_timeout_start(ctx);
return NULL;
}
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
{
/* start timer, if not already started */
- if (timer_pending(&ctx->tx_timer) == 0) {
- ctx->tx_timer.function = &cdc_ncm_tx_timeout;
- ctx->tx_timer.data = (unsigned long)ctx;
- ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
- add_timer(&ctx->tx_timer);
- }
+ if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
+ hrtimer_start(&ctx->tx_timer,
+ ktime_set(0, CDC_NCM_TIMER_INTERVAL),
+ HRTIMER_MODE_REL);
}
-static void cdc_ncm_tx_timeout(unsigned long arg)
+static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer)
{
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
- u8 restart;
+ struct cdc_ncm_ctx *ctx =
+ container_of(timer, struct cdc_ncm_ctx, tx_timer);
- spin_lock(&ctx->mtx);
- if (ctx->tx_timer_pending != 0) {
- ctx->tx_timer_pending--;
- restart = 1;
- } else {
- restart = 0;
- }
+ if (!atomic_read(&ctx->stop))
+ tasklet_schedule(&ctx->bh);
+ return HRTIMER_NORESTART;
+}
- spin_unlock(&ctx->mtx);
+static void cdc_ncm_txpath_bh(unsigned long param)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param;
- if (restart) {
- spin_lock(&ctx->mtx);
+ spin_lock_bh(&ctx->mtx);
+ if (ctx->tx_timer_pending != 0) {
+ ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
- spin_unlock(&ctx->mtx);
+ spin_unlock_bh(&ctx->mtx);
} else if (ctx->netdev != NULL) {
+ spin_unlock_bh(&ctx->mtx);
+ netif_tx_lock_bh(ctx->netdev);
usbnet_start_xmit(NULL, ctx->netdev);
+ netif_tx_unlock_bh(ctx->netdev);
}
}
@@ -917,7 +927,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb_out;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- u8 need_timer = 0;
/*
* The Ethernet API we are using does not support transmitting
@@ -929,19 +938,9 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
if (ctx == NULL)
goto error;
- spin_lock(&ctx->mtx);
+ spin_lock_bh(&ctx->mtx);
skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
- if (ctx->tx_curr_skb != NULL)
- need_timer = 1;
-
- /* Start timer, if there is a remaining skb */
- if (need_timer)
- cdc_ncm_tx_timeout_start(ctx);
-
- if (skb_out)
- dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
-
- spin_unlock(&ctx->mtx);
+ spin_unlock_bh(&ctx->mtx);
return skb_out;
error:
@@ -954,108 +953,103 @@ error:
static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
- struct cdc_ncm_ctx *ctx;
- int sumlen;
- int actlen;
- int temp;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ int len;
int nframes;
int x;
int offset;
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_dpe16 *dpe16;
- ctx = (struct cdc_ncm_ctx *)dev->data[0];
if (ctx == NULL)
goto error;
- actlen = skb_in->len;
- sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
-
- if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
+ if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) +
+ sizeof(struct usb_cdc_ncm_ndp16))) {
pr_debug("frame too short\n");
goto error;
}
- memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
- sizeof(ctx->rx_ncm.nth16));
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data;
- if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
- USB_CDC_NCM_NTH16_SIGN) {
+ if (le32_to_cpu(nth16->dwSignature) != USB_CDC_NCM_NTH16_SIGN) {
pr_debug("invalid NTH16 signature <%u>\n",
- le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
+ le32_to_cpu(nth16->dwSignature));
goto error;
}
- temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
- if (temp > sumlen) {
- pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
+ len = le16_to_cpu(nth16->wBlockLength);
+ if (len > ctx->rx_max) {
+ pr_debug("unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
goto error;
}
- temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
- if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
- pr_debug("invalid DPT16 index\n");
+ if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
+ (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
+ pr_debug("sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth16->wSequence));
+ }
+ ctx->rx_seq = le16_to_cpu(nth16->wSequence);
+
+ len = le16_to_cpu(nth16->wNdpIndex);
+ if ((len + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
+ pr_debug("invalid DPT16 index <%u>\n",
+ le16_to_cpu(nth16->wNdpIndex));
goto error;
}
- memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
- sizeof(ctx->rx_ncm.ndp16));
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(((u8 *)skb_in->data) + len);
- if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
- USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+ if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
pr_debug("invalid DPT16 signature <%u>\n",
- le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+ le32_to_cpu(ndp16->dwSignature));
goto error;
}
- if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
- USB_CDC_NCM_NDP16_LENGTH_MIN) {
+ if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
pr_debug("invalid DPT16 length <%u>\n",
- le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+ le32_to_cpu(ndp16->dwSignature));
goto error;
}
- nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
+ nframes = ((le16_to_cpu(ndp16->wLength) -
sizeof(struct usb_cdc_ncm_ndp16)) /
sizeof(struct usb_cdc_ncm_dpe16));
nframes--; /* we process NDP entries except for the last one */
- pr_debug("nframes = %u\n", nframes);
-
- temp += sizeof(ctx->rx_ncm.ndp16);
+ len += sizeof(struct usb_cdc_ncm_ndp16);
- if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
+ if ((len + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) >
+ skb_in->len) {
pr_debug("Invalid nframes = %d\n", nframes);
goto error;
}
- if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
- pr_debug("Truncating number of frames from %u to %u\n",
- nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
- nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
- }
-
- memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
- nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
+ dpe16 = (struct usb_cdc_ncm_dpe16 *)(((u8 *)skb_in->data) + len);
- for (x = 0; x < nframes; x++) {
- offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
- temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
+ for (x = 0; x < nframes; x++, dpe16++) {
+ offset = le16_to_cpu(dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe16->wDatagramLength);
/*
* CDC NCM ch. 3.7
* All entries after first NULL entry are to be ignored
*/
- if ((offset == 0) || (temp == 0)) {
+ if ((offset == 0) || (len == 0)) {
if (!x)
goto error; /* empty NTB */
break;
}
/* sanity checking */
- if (((offset + temp) > actlen) ||
- (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
+ if (((offset + len) > skb_in->len) ||
+ (len > ctx->rx_max) || (len < ETH_HLEN)) {
pr_debug("invalid frame detected (ignored)"
"offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, temp, skb_in);
+ x, offset, len, skb_in);
if (!x)
goto error;
break;
@@ -1064,9 +1058,9 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
skb = skb_clone(skb_in, GFP_ATOMIC);
if (!skb)
goto error;
- skb->len = temp;
+ skb->len = len;
skb->data = ((u8 *)skb_in->data) + offset;
- skb_set_tail_pointer(skb, temp);
+ skb_set_tail_pointer(skb, len);
usbnet_skb_return(dev, skb);
}
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d034d9c42548..df2a2cf35a99 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1098,13 +1098,7 @@ err_fw:
dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
- dev_info(&intf->dev, "Read MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- (int)kaweth->configuration.hw_addr[0],
- (int)kaweth->configuration.hw_addr[1],
- (int)kaweth->configuration.hw_addr[2],
- (int)kaweth->configuration.hw_addr[3],
- (int)kaweth->configuration.hw_addr[4],
- (int)kaweth->configuration.hw_addr[5]);
+ dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
if(!memcmp(&kaweth->configuration.hw_addr,
&bcast_addr,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a29aa9cf9f6e..c434b6ba0337 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -239,7 +239,7 @@ static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 5d99b8cacd7d..752393092325 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1332,10 +1332,8 @@ static int pegasus_probe(struct usb_interface *intf,
usb_get_dev(dev);
net = alloc_etherdev(sizeof(struct pegasus));
- if (!net) {
- dev_err(&intf->dev, "can't allocate %s\n", "device");
+ if (!net)
goto out;
- }
pegasus = netdev_priv(net);
pegasus->dev_index = dev_index;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 0710b4ca9252..6dda2fe5b15b 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -894,10 +894,8 @@ static int rtl8150_probe(struct usb_interface *intf,
struct net_device *netdev;
netdev = alloc_etherdev(sizeof(rtl8150_t));
- if (!netdev) {
- err("Out of memory");
+ if (!netdev)
return -ENOMEM;
- }
dev = netdev_priv(netdev);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 3b017bbd2a22..187d01ccb973 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -615,7 +615,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
}
/* no eeprom, or eeprom values are invalid. generate random MAC */
- random_ether_addr(dev->net->dev_addr);
+ eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr");
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d45520e6dd48..5f19f84d3494 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -614,7 +614,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
}
/* no eeprom, or eeprom values are invalid. generate random MAC */
- random_ether_addr(dev->net->dev_addr);
+ eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n");
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 59681f01a54e..4b8b52ca09d8 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1336,10 +1336,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
// set up our own records
net = alloc_etherdev(sizeof(*dev));
- if (!net) {
- dbg ("can't kmalloc dev");
+ if (!net)
goto out;
- }
/* netdev_printk() needs this so do it as early as possible */
SET_NETDEV_DEV(net, &udev->dev);
@@ -1537,7 +1535,7 @@ int usbnet_resume (struct usb_interface *intf)
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
if (!(dev->txq.qlen >= TX_QLEN(dev)))
- netif_start_queue(dev->net);
+ netif_tx_wake_all_queues(dev->net);
tasklet_schedule (&dev->bh);
}
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4a3402898f2a..5852361032c4 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -346,7 +346,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
}
if (tbp[IFLA_ADDRESS] == NULL)
- random_ether_addr(peer->dev_addr);
+ eth_hw_addr_random(peer);
err = register_netdevice(peer);
put_net(net);
@@ -368,7 +368,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
*/
if (tb[IFLA_ADDRESS] == NULL)
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4880aa8b4c28..019da012669f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -255,7 +255,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
{
struct virtnet_info *vi = netdev_priv(dev);
- struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
struct page *page;
struct skb_vnet_hdr *hdr;
@@ -549,7 +549,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
{
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
- struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
@@ -688,8 +688,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
unsigned int start;
for_each_possible_cpu(cpu) {
- struct virtnet_stats __percpu *stats
- = per_cpu_ptr(vi->stats, cpu);
+ struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
u64 tpackets, tbytes, rpackets, rbytes;
do {
@@ -1061,7 +1060,7 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len) < 0)
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
/* Set up our device-specific information */
vi = netdev_priv(dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 756c0f5565a5..3f04ba0a5454 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -537,11 +537,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
GFP_KERNEL);
- if (!tq->buf_info) {
- printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
- adapter->netdev->name);
+ if (!tq->buf_info)
goto err;
- }
return 0;
@@ -636,7 +633,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
dev_dbg(&adapter->netdev->dev,
"alloc_rx_buf: %d allocated, next2fill %u, next2comp "
- "%u, uncommited %u\n", num_allocated, ring->next2fill,
+ "%u, uncommitted %u\n", num_allocated, ring->next2fill,
ring->next2comp, rq->uncommitted[ring_idx]);
/* so that the device can distinguish a full ring and an empty ring */
@@ -816,22 +813,19 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
- ctx->l4_hdr_size = ((struct tcphdr *)
- skb_transport_header(skb))->doff * 4;
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
- struct iphdr *iph = (struct iphdr *)
- skb_network_header(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
if (iph->protocol == IPPROTO_TCP)
- ctx->l4_hdr_size = ((struct tcphdr *)
- skb_transport_header(skb))->doff * 4;
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
else if (iph->protocol == IPPROTO_UDP)
- ctx->l4_hdr_size =
- sizeof(struct udphdr);
+ ctx->l4_hdr_size = sizeof(struct udphdr);
else
ctx->l4_hdr_size = 0;
} else {
@@ -876,14 +870,17 @@ static void
vmxnet3_prepare_tso(struct sk_buff *skb,
struct vmxnet3_tx_ctx *ctx)
{
- struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
+ struct tcphdr *tcph = tcp_hdr(skb);
+
if (ctx->ipv4) {
- struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
+ struct iphdr *iph = ip_hdr(skb);
+
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
IPPROTO_TCP, 0);
}
@@ -1514,11 +1511,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
bi = kzalloc(sz, GFP_KERNEL);
- if (!bi) {
- printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
- adapter->netdev->name);
+ if (!bi)
goto err;
- }
+
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
@@ -2704,8 +2699,8 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
adapter->intr.num_intrs = vectors;
return 0;
} else if (err < 0) {
- printk(KERN_ERR "Failed to enable MSI-X for %s, error"
- " %d\n", adapter->netdev->name, err);
+ netdev_err(adapter->netdev,
+ "Failed to enable MSI-X, error: %d\n", err);
vectors = 0;
} else if (err < vector_threshold) {
break;
@@ -2713,15 +2708,15 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
/* If fails to enable required number of MSI-x vectors
* try enabling minimum number of vectors required.
*/
+ netdev_err(adapter->netdev,
+ "Failed to enable %d MSI-X, trying %d instead\n",
+ vectors, vector_threshold);
vectors = vector_threshold;
- printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
- " %d instead\n", vectors, adapter->netdev->name,
- vector_threshold);
}
}
- printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
- " are lower than min threshold required.\n");
+ netdev_info(adapter->netdev,
+ "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
return err;
}
@@ -2787,8 +2782,9 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
return;
/* If we cannot allocate MSIx vectors use only one rx queue */
- printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
- "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
+ netdev_info(adapter->netdev,
+ "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
+ err);
adapter->intr.type = VMXNET3_IT_MSI;
}
@@ -2918,11 +2914,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
num_tx_queues, num_rx_queues);
- if (!netdev) {
- printk(KERN_ERR "Failed to alloc ethernet device for adapter "
- "%s\n", pci_name(pdev));
+ if (!netdev)
return -ENOMEM;
- }
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
@@ -2959,8 +2952,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
- printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
err = -ENOMEM;
goto err_alloc_pm;
}
@@ -2969,8 +2960,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
if (adapter->rss_conf == NULL) {
- printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
err = -ENOMEM;
goto err_alloc_rss;
}
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 54f995f4a5a3..09a50751763b 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -325,10 +325,8 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
- pr_err("unable to allocate memory\n");
+ if (card == NULL)
return -ENOBUFS;
- }
card->dev = alloc_hdlcdev(card);
if (!card->dev) {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 058e1697c174..fe8d060d8fff 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -903,10 +903,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
int i, ret = -ENOMEM;
root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
- if (!root) {
- pr_err("can't allocate data\n");
+ if (!root)
goto err_out;
- }
for (i = 0; i < dev_per_card; i++) {
root[i].dev = alloc_hdlcdev(root + i);
@@ -915,10 +913,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
}
ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
- if (!ppriv) {
- pr_err("can't allocate private data\n");
+ if (!ppriv)
goto err_free_dev;
- }
ppriv->root = root;
spin_lock_init(&ppriv->lock);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index eb2028187fbe..7c6cb4f31798 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1087,7 +1087,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
}
if (type == ARPHRD_ETHER)
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
else {
*(__be16*)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 05c9b0b96239..3ab72b3082de 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -101,7 +101,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
old_qlen = dev->tx_queue_len;
ether_setup(dev);
dev->tx_queue_len = old_qlen;
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b7f2358d23be..76a8a4a522e9 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -497,7 +497,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
data = kmalloc(xc.len, GFP_KERNEL);
if (!data) {
- printk(KERN_WARNING "%s: Failed to allocate memory for copy\n", dev->name);
ret = -ENOMEM;
break;
}
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5129ad514d26..315bf09d6a20 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -358,10 +358,8 @@ static int __init n2_run(unsigned long io, unsigned long irq,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
- pr_err("unable to allocate memory\n");
+ if (card == NULL)
return -ENOBUFS;
- }
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index c49c1b3c7aad..5fe246e060d7 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -320,7 +320,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
- pr_err("unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 1ce21163c776..9659fcaa34ed 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -299,7 +299,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
- pr_err("unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 44b707197258..feb7541b33fb 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -604,7 +604,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
alloc_size = sizeof(card_t) + ports * sizeof(port_t);
card = kzalloc(alloc_size, GFP_KERNEL);
if (card == NULL) {
- pr_err("%s: unable to allocate memory\n", pci_name(pdev));
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 8a10bb730d5a..e862369b4a6d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -786,10 +786,8 @@ static int __init init_x25_asy(void)
x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
GFP_KERNEL);
- if (!x25_asy_devs) {
- pr_warn("Can't allocate x25_asy_ctrls[] array! Uaargh! (-> No X.25 available)\n");
+ if (!x25_asy_devs)
return -ENOMEM;
- }
return tty_register_ldisc(N_X25, &x25_ldisc);
}
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1c008c61b95c..ddc061dd150c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1869,7 +1869,7 @@ static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock)
static void try_auto_wep(struct airo_info *ai)
{
- if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) {
+ if (auto_wep && !test_bit(FLAG_RADIO_DOWN, &ai->flags)) {
ai->expires = RUN_AT(3*HZ);
wake_up_interruptible(&ai->thr_wait);
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index efc01110dc34..c54b7d37bff1 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -174,28 +174,24 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
-extern __printf(2, 3) void ath_printk(const char *level, const char *fmt, ...);
-
-#define _ath_printk(level, common, fmt, ...) \
-do { \
- __always_unused struct ath_common *unused = common; \
- ath_printk(level, fmt, ##__VA_ARGS__); \
-} while (0)
+__printf(3, 4)
+void ath_printk(const char *level, const struct ath_common *common,
+ const char *fmt, ...);
#define ath_emerg(common, fmt, ...) \
- _ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
#define ath_alert(common, fmt, ...) \
- _ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
#define ath_crit(common, fmt, ...) \
- _ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
#define ath_err(common, fmt, ...) \
- _ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
#define ath_warn(common, fmt, ...) \
- _ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
#define ath_notice(common, fmt, ...) \
- _ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
#define ath_info(common, fmt, ...) \
- _ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
+ ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
/**
* enum ath_debug_level - atheros wireless debug level
@@ -256,7 +252,7 @@ enum ATH_DEBUG {
#define ath_dbg(common, dbg_mask, fmt, ...) \
do { \
if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
- _ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
+ ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
} while (0)
#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index ee7ea572b065..8faa129da5a0 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -140,23 +140,23 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
/* Enable WMAC AHB arbitration */
- reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
- __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
/* Enable global WMAC swapping */
- reg = __raw_readl((void __iomem *) AR5K_AR2315_BYTESWAP);
+ reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP);
reg |= AR5K_AR2315_BYTESWAP_WMAC;
- __raw_writel(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
} else {
/* Enable WMAC DMA access (assuming 5312 or 231x*/
/* TODO: check other platforms */
- reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
+ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
if (to_platform_device(ah->dev)->id == 0)
reg |= AR5K_AR5312_ENABLE_WLAN0;
else
reg |= AR5K_AR5312_ENABLE_WLAN1;
- __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+ iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
/*
* On a dual-band AR5312, the multiband radio is only
@@ -203,17 +203,17 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
/* Disable WMAC AHB arbitration */
- reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
- __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
} else {
/*Stop DMA access */
- reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
+ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
if (to_platform_device(ah->dev)->id == 0)
reg &= ~AR5K_AR5312_ENABLE_WLAN0;
else
reg &= ~AR5K_AR5312_ENABLE_WLAN1;
- __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+ iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
}
ath5k_deinit_ah(ah);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bf674161a217..35e93704c4ef 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -257,7 +257,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
"beacon RSSI high");
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
* signal detection */
- if (ofdm_trigger && as->ofdm_weak_sig == true) {
+ if (ofdm_trigger && as->ofdm_weak_sig) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_spur_immunity_level(ah, 0);
return;
@@ -272,7 +272,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
* but can raise firstep level */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI mid");
- if (ofdm_trigger && as->ofdm_weak_sig == false)
+ if (ofdm_trigger && !as->ofdm_weak_sig)
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
@@ -282,7 +282,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
* detect and zero firstep level to maximize CCK sensitivity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI low, 2GHz");
- if (ofdm_trigger && as->ofdm_weak_sig == true)
+ if (ofdm_trigger && as->ofdm_weak_sig)
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
if (as->firstep_level > 0)
ath5k_ani_set_firstep_level(ah, 0);
@@ -326,7 +326,7 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI is mid-range: turn on ODFM weak signal
* detection and next, lower firstep level */
- if (as->ofdm_weak_sig == false) {
+ if (!as->ofdm_weak_sig) {
ath5k_ani_set_ofdm_weak_signal_detection(ah,
true);
return;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index c2b2518c2ecd..8d434b8f5855 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1320,6 +1320,7 @@ struct ath5k_hw {
struct ieee80211_vif *bslot[ATH_BCBUF];
u16 num_ap_vifs;
u16 num_adhoc_vifs;
+ u16 num_mesh_vifs;
unsigned int bhalq, /* SW q for outgoing beacons */
bmisscount, /* missed beacon transmits */
bintval, /* beacon interval in TU */
@@ -1656,12 +1657,12 @@ static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
{
- return __raw_readl(ath5k_ahb_reg(ah, reg));
+ return ioread32(ath5k_ahb_reg(ah, reg));
}
static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
{
- __raw_writel(val, ath5k_ahb_reg(ah, reg));
+ iowrite32(val, ath5k_ahb_reg(ah, reg));
}
#else
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index d366dadcf86e..0e643b016b32 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -80,7 +80,7 @@ static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
-static int ath5k_modparam_no_hw_rfkill_switch;
+static bool ath5k_modparam_no_hw_rfkill_switch;
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
@@ -1867,7 +1867,8 @@ ath5k_beacon_send(struct ath5k_hw *ah)
ah->bmisscount = 0;
}
- if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) ||
+ if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
+ ah->num_mesh_vifs > 1) ||
ah->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
@@ -1952,7 +1953,8 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
u64 hw_tsf;
intval = ah->bintval & AR5K_BEACON_PERIOD;
- if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) {
+ if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
+ + ah->num_mesh_vifs > 1) {
intval /= ATH_BCBUF; /* staggered multi-bss beacons */
if (intval < 15)
ATH5K_WARN(ah, "intval %u is too low, min 15\n",
@@ -2330,15 +2332,6 @@ ath5k_calibrate_work(struct work_struct *work)
"got new rfgain, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
}
-
- /* TODO: On full calibration we should stop TX here,
- * so that it doesn't interfere (mostly due to gain_f
- * calibration that messes with tx packets -see phy.c).
- *
- * NOTE: Stopping the queues from above is not enough
- * to stop TX but saves us from disconecting (at least
- * we don't lose packets). */
- ieee80211_stop_queues(ah->hw);
} else
ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
@@ -2353,10 +2346,9 @@ ath5k_calibrate_work(struct work_struct *work)
ah->curchan->center_freq));
/* Clear calibration flags */
- if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
- ieee80211_wake_queues(ah->hw);
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
- } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
@@ -2442,6 +2434,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ /* SW support for IBSS_RSN is provided by mac80211 */
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
/* both antennas can be configured as RX or TX */
hw->wiphy->available_antennas_tx = 0x3;
hw->wiphy->available_antennas_rx = 0x3;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 6ed4c0717e3e..5c5329955414 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -134,6 +134,8 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ah->num_ap_vifs++;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs++;
+ else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
+ ah->num_mesh_vifs++;
}
/* Any MAC address is fine, all others are included through the
@@ -175,6 +177,8 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
ah->num_ap_vifs--;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs--;
+ else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
+ ah->num_mesh_vifs--;
ath5k_update_bssid_mask_and_opmode(ah, NULL);
mutex_unlock(&ah->lock);
@@ -483,6 +487,14 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ath5k_modparam_nohwcrypt)
return -EOPNOTSUPP;
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* don't program group keys when using IBSS_RSN */
+ return -EOPNOTSUPP;
+ }
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index e1f8613426a9..3a2845489a1b 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1871,31 +1871,15 @@ ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
ret = 0;
}
- /* On full calibration do an AGC calibration and
- * request a PAPD probe for gainf calibration if
- * needed */
- if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
-
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
- AR5K_PHY_AGCCTL_CAL);
-
- ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
- AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
- 0, false);
- if (ret) {
- ATH5K_ERR(ah,
- "gain calibration timeout (%uMHz)\n",
- channel->center_freq);
- }
-
- if ((ah->ah_radio == AR5K_RF5111 ||
- ah->ah_radio == AR5K_RF5112)
- && (channel->hw_value != AR5K_MODE_11B))
- ath5k_hw_request_rfgain_probe(ah);
- }
-
- /* Update noise floor
- * XXX: Only do this after AGC calibration */
+ /* On full calibration request a PAPD probe for
+ * gainf calibration if needed */
+ if ((ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ (ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112) &&
+ channel->hw_value != AR5K_MODE_11B)
+ ath5k_hw_request_rfgain_probe(ah);
+
+ /* Update noise floor */
if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
ath5k_hw_update_noise_floor(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 250db40b751d..200f165c0c6d 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -473,14 +473,14 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
}
/* Put BB/MAC into reset */
- regval = __raw_readl(reg);
- __raw_writel(regval | val, reg);
- regval = __raw_readl(reg);
+ regval = ioread32(reg);
+ iowrite32(regval | val, reg);
+ regval = ioread32(reg);
usleep_range(100, 150);
/* Bring BB/MAC out of reset */
- __raw_writel(regval & ~val, reg);
- regval = __raw_readl(reg);
+ iowrite32(regval & ~val, reg);
+ regval = ioread32(reg);
/*
* Reset configuration register (for hw byte-swap). Note that this
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 3d5f8be20eac..d755a5e7ed20 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,12 +1,29 @@
config ATH6KL
- tristate "Atheros ath6kl support"
+ tristate "Atheros mobile chipsets support"
+
+config ATH6KL_SDIO
+ tristate "Atheros ath6kl SDIO support"
+ depends on ATH6KL
depends on MMC
depends on CFG80211
---help---
This module adds support for wireless adapters based on
- Atheros AR6003 chipset running over SDIO. If you choose to
- build it as a module, it will be called ath6kl. Pls note
- that AR6002 and AR6001 are not supported by this driver.
+ Atheros AR6003 and AR6004 chipsets running over SDIO. If you
+ choose to build it as a module, it will be called ath6kl_sdio.
+ Please note that AR6002 and AR6001 are not supported by this
+ driver.
+
+config ATH6KL_USB
+ tristate "Atheros ath6kl USB support"
+ depends on ATH6KL
+ depends on USB
+ depends on CFG80211
+ depends on EXPERIMENTAL
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros AR6004 chipset running over USB. This is still under
+ implementation and it isn't functional. If you choose to
+ build it as a module, it will be called ath6kl_usb.
config ATH6KL_DEBUG
bool "Atheros ath6kl debugging"
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 707069303550..85746c3eb027 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -1,5 +1,6 @@
#------------------------------------------------------------------------------
-# Copyright (c) 2004-2010 Atheros Communications Inc.
+# Copyright (c) 2004-2011 Atheros Communications Inc.
+# Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
# All rights reserved.
#
#
@@ -21,17 +22,21 @@
# Author(s): ="Atheros"
#------------------------------------------------------------------------------
-obj-$(CONFIG_ATH6KL) := ath6kl.o
-ath6kl-y += debug.o
-ath6kl-y += hif.o
-ath6kl-y += htc.o
-ath6kl-y += bmi.o
-ath6kl-y += cfg80211.o
-ath6kl-y += init.o
-ath6kl-y += main.o
-ath6kl-y += txrx.o
-ath6kl-y += wmi.o
-ath6kl-y += sdio.o
-ath6kl-$(CONFIG_NL80211_TESTMODE) += testmode.o
+obj-$(CONFIG_ATH6KL) += ath6kl_core.o
+ath6kl_core-y += debug.o
+ath6kl_core-y += hif.o
+ath6kl_core-y += htc.o
+ath6kl_core-y += bmi.o
+ath6kl_core-y += cfg80211.o
+ath6kl_core-y += init.o
+ath6kl_core-y += main.o
+ath6kl_core-y += txrx.o
+ath6kl_core-y += wmi.o
+ath6kl_core-y += core.o
+ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
-ccflags-y += -D__CHECK_ENDIAN__
+obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
+ath6kl_sdio-y += sdio.o
+
+obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
+ath6kl_usb-y += usb.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index bce3575c310a..334dbd834b3a 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,8 +58,14 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
return ret;
}
- ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
- sizeof(targ_info->version));
+ if (ar->hif_type == ATH6KL_HIF_TYPE_USB) {
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info,
+ sizeof(*targ_info));
+ } else {
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
+ sizeof(targ_info->version));
+ }
+
if (ret) {
ath6kl_err("Unable to recv target info: %d\n", ret);
return ret;
@@ -99,7 +106,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
}
ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
- targ_info->version, targ_info->type);
+ targ_info->version, targ_info->type);
return 0;
}
@@ -186,7 +193,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
ath6kl_dbg(ATH6KL_DBG_BMI,
- "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
+ "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
len_remain = len;
while (len_remain) {
@@ -428,7 +435,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
offset += sizeof(tx_len);
memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
- tx_len);
+ tx_len);
offset += tx_len;
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index f1ca6812456d..18fdd69e1f71 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -222,6 +223,29 @@ struct ath6kl_bmi_target_info {
__le32 type; /* target type */
} __packed;
+#define ath6kl_bmi_write_hi32(ar, item, val) \
+ ({ \
+ u32 addr; \
+ __le32 v; \
+ \
+ addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
+ v = cpu_to_le32(val); \
+ ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \
+ })
+
+#define ath6kl_bmi_read_hi32(ar, item, val) \
+ ({ \
+ u32 addr, *check_type = val; \
+ __le32 tmp; \
+ int ret; \
+ \
+ (void) (check_type == val); \
+ addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
+ ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \
+ *val = le32_to_cpu(tmp); \
+ ret; \
+ })
+
int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar);
void ath6kl_bmi_reset(struct ath6kl *ar);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 6c59a217b1a1..00d38952b5fb 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -15,6 +16,8 @@
*/
#include <linux/moduleparam.h>
+#include <linux/inetdevice.h>
+#include <linux/export.h>
#include "core.h"
#include "cfg80211.h"
@@ -22,10 +25,6 @@
#include "hif-ops.h"
#include "testmode.h"
-static unsigned int ath6kl_p2p;
-
-module_param(ath6kl_p2p, uint, 0644);
-
#define RATETAB_ENT(_rate, _rateid, _flags) { \
.bitrate = (_rate), \
.flags = (_flags), \
@@ -70,6 +69,10 @@ static struct ieee80211_rate ath6kl_rates[] = {
#define ath6kl_g_rates (ath6kl_rates + 0)
#define ath6kl_g_rates_size 12
+#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
+ IEEE80211_HT_CAP_SGI_20 | \
+ IEEE80211_HT_CAP_SGI_40)
+
static struct ieee80211_channel ath6kl_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@@ -114,6 +117,8 @@ static struct ieee80211_supported_band ath6kl_band_2ghz = {
.channels = ath6kl_2ghz_channels,
.n_bitrates = ath6kl_g_rates_size,
.bitrates = ath6kl_g_rates,
+ .ht_cap.cap = ath6kl_g_htcap,
+ .ht_cap.ht_supported = true,
};
static struct ieee80211_supported_band ath6kl_band_5ghz = {
@@ -121,6 +126,8 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
.channels = ath6kl_5ghz_a_channels,
.n_bitrates = ath6kl_a_rates_size,
.bitrates = ath6kl_a_rates,
+ .ht_cap.cap = ath6kl_g_htcap,
+ .ht_cap.ht_supported = true,
};
#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
@@ -196,7 +203,7 @@ static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
break;
default:
- ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
+ ath6kl_err("%s: 0x%x not supported\n", __func__, auth_type);
return -ENOTSUPP;
}
@@ -383,7 +390,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
return false;
if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
- ar->num_vif))
+ ar->num_vif))
return false;
if (type == NL80211_IFTYPE_STATION ||
@@ -409,6 +416,12 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
return false;
}
+static bool ath6kl_is_tx_pending(struct ath6kl *ar)
+{
+ return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
+}
+
+
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
@@ -416,6 +429,7 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct ath6kl_vif *vif = netdev_priv(dev);
int status;
u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
+ u16 interval;
ath6kl_cfg80211_sscan_disable(vif);
@@ -452,8 +466,8 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
* sleep until the command queue drains
*/
wait_event_interruptible_timeout(ar->event_wq,
- ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
- WMI_TIMEOUT);
+ ath6kl_is_tx_pending(ar),
+ WMI_TIMEOUT);
if (signal_pending(current)) {
ath6kl_err("cmd queue drain timeout\n");
up(&ar->sem);
@@ -461,13 +475,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
}
- if (sme->ie && (sme->ie_len > 0)) {
- status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
- if (status) {
- up(&ar->sem);
- return status;
- }
- } else
+ status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->ie == NULL || sme->ie_len == 0)
ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
if (test_bit(CONNECTED, &vif->flags) &&
@@ -523,8 +537,7 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
(vif->prwise_crypto == WEP_CRYPT)) {
struct ath6kl_key *key = NULL;
- if (sme->key_idx < WMI_MIN_KEY_INDEX ||
- sme->key_idx > WMI_MAX_KEY_INDEX) {
+ if (sme->key_idx > WMI_MAX_KEY_INDEX) {
ath6kl_err("key index %d out of bounds\n",
sme->key_idx);
up(&ar->sem);
@@ -549,7 +562,7 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
- ALL_BSS_FILTER, 0) != 0) {
+ ALL_BSS_FILTER, 0) != 0) {
ath6kl_err("couldn't set bss filtering\n");
up(&ar->sem);
return -EIO;
@@ -571,6 +584,20 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->grp_crypto_len, vif->ch_hint);
vif->reconnect_flag = 0;
+
+ if (vif->nw_type == INFRA_NETWORK) {
+ interval = max_t(u16, vif->listen_intvl_t,
+ ATH6KL_MAX_WOW_LISTEN_INTL);
+ status = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ interval,
+ 0);
+ if (status) {
+ ath6kl_err("couldn't set listen intervel\n");
+ up(&ar->sem);
+ return status;
+ }
+ }
+
status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
vif->dot11_auth_mode, vif->auth_mode,
vif->prwise_crypto,
@@ -593,8 +620,8 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
- ((vif->auth_mode == WPA_PSK_AUTH)
- || (vif->auth_mode == WPA2_PSK_AUTH))) {
+ ((vif->auth_mode == WPA_PSK_AUTH) ||
+ (vif->auth_mode == WPA2_PSK_AUTH))) {
mod_timer(&vif->disconnect_timer,
jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
}
@@ -605,11 +632,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
- enum network_type nw_type,
- const u8 *bssid,
- struct ieee80211_channel *chan,
- const u8 *beacon_ie, size_t beacon_ie_len)
+static struct cfg80211_bss *
+ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
+ enum network_type nw_type,
+ const u8 *bssid,
+ struct ieee80211_channel *chan,
+ const u8 *beacon_ie,
+ size_t beacon_ie_len)
{
struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
@@ -638,7 +667,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
*/
ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
if (ie == NULL)
- return -ENOMEM;
+ return NULL;
ie[0] = WLAN_EID_SSID;
ie[1] = vif->ssid_len;
memcpy(ie + 2, vif->ssid, vif->ssid_len);
@@ -652,15 +681,9 @@ static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
"cfg80211\n", bssid);
kfree(ie);
} else
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
- "entry\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
- if (bss == NULL)
- return -ENOMEM;
-
- cfg80211_put_bss(bss);
-
- return 0;
+ return bss;
}
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
@@ -672,6 +695,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
{
struct ieee80211_channel *chan;
struct ath6kl *ar = vif->ar;
+ struct cfg80211_bss *bss;
/* capinfo + listen interval */
u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -712,8 +736,9 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
chan = ieee80211_get_channel(ar->wiphy, (int) channel);
- if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info,
- beacon_ie_len) < 0) {
+ bss = ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan,
+ assoc_info, beacon_ie_len);
+ if (!bss) {
ath6kl_err("could not add cfg80211 bss entry\n");
return;
}
@@ -722,6 +747,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ cfg80211_put_bss(bss);
return;
}
@@ -732,11 +758,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ cfg80211_put_bss(bss);
} else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
- cfg80211_roamed(vif->ndev, chan, bssid,
- assoc_req_ie, assoc_req_len,
- assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
}
}
@@ -828,13 +854,13 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
if (vif->sme_state == SME_CONNECTING) {
cfg80211_connect_result(vif->ndev,
- bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
+ bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
cfg80211_disconnected(vif->ndev, reason,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
@@ -880,19 +906,14 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
request->ssids[i].ssid);
}
- /*
- * FIXME: we should clear the IE in fw if it's not set so just
- * remove the check altogether
- */
- if (request->ie) {
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
- WMI_FRAME_PROBE_REQ,
- request->ie, request->ie_len);
- if (ret) {
- ath6kl_err("failed to set Probe Request appie for "
- "scan");
- return ret;
- }
+ /* this also clears IE in fw if it's not set */
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_err("failed to set Probe Request appie for "
+ "scan");
+ return ret;
}
/*
@@ -921,7 +942,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
force_fg_scan = 1;
if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
- ar->fw_capabilities)) {
+ ar->fw_capabilities)) {
/*
* If capable of doing P2P mgmt operations using
* station interface, send additional information like
@@ -930,14 +951,17 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
*/
ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
WMI_LONG_SCAN, force_fg_scan,
- false, 0, 0, n_channels,
- channels, request->no_cck,
+ false, 0,
+ ATH6KL_FG_SCAN_INTERVAL,
+ n_channels, channels,
+ request->no_cck,
request->rates);
} else {
ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
WMI_LONG_SCAN, force_fg_scan,
- false, 0, 0, n_channels,
- channels);
+ false, 0,
+ ATH6KL_FG_SCAN_INTERVAL,
+ n_channels, channels);
}
if (ret)
ath6kl_err("wmi_startscan_cmd failed\n");
@@ -984,6 +1008,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
struct ath6kl *ar = ath6kl_priv(ndev);
struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
+ int seq_len;
u8 key_usage;
u8 key_type;
@@ -997,7 +1022,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
params->key);
}
- if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
key_index);
@@ -1012,23 +1037,21 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
else
key_usage = GROUP_USAGE;
- if (params) {
- int seq_len = params->seq_len;
- if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
- seq_len > ATH6KL_KEY_SEQ_LEN) {
- /* Only first half of the WPI PN is configured */
- seq_len = ATH6KL_KEY_SEQ_LEN;
- }
- if (params->key_len > WLAN_MAX_KEY_LEN ||
- seq_len > sizeof(key->seq))
- return -EINVAL;
-
- key->key_len = params->key_len;
- memcpy(key->key, params->key, key->key_len);
- key->seq_len = seq_len;
- memcpy(key->seq, params->seq, key->seq_len);
- key->cipher = params->cipher;
+ seq_len = params->seq_len;
+ if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
+ seq_len > ATH6KL_KEY_SEQ_LEN) {
+ /* Only first half of the WPI PN is configured */
+ seq_len = ATH6KL_KEY_SEQ_LEN;
}
+ if (params->key_len > WLAN_MAX_KEY_LEN ||
+ seq_len > sizeof(key->seq))
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ memcpy(key->key, params->key, key->key_len);
+ key->seq_len = seq_len;
+ memcpy(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -1051,9 +1074,9 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOTSUPP;
}
- if (((vif->auth_mode == WPA_PSK_AUTH)
- || (vif->auth_mode == WPA2_PSK_AUTH))
- && (key_usage & GROUP_USAGE))
+ if (((vif->auth_mode == WPA_PSK_AUTH) ||
+ (vif->auth_mode == WPA2_PSK_AUTH)) &&
+ (key_usage & GROUP_USAGE))
del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -1063,7 +1086,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (vif->nw_type == AP_NETWORK && !pairwise &&
(key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
- key_type == WAPI_CRYPT) && params) {
+ key_type == WAPI_CRYPT)) {
ar->ap_mode_bkey.valid = true;
ar->ap_mode_bkey.key_index = key_index;
ar->ap_mode_bkey.key_type = key_type;
@@ -1115,7 +1138,7 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
key_index);
@@ -1148,7 +1171,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
key_index);
@@ -1184,7 +1207,7 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ if (key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n",
__func__, key_index);
@@ -1268,7 +1291,6 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
- u8 ath6kl_dbm;
int dbm = MBM_TO_DBM(mbm);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
@@ -1285,7 +1307,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
return 0;
case NL80211_TX_POWER_LIMITED:
- ar->tx_pwr = ath6kl_dbm = dbm;
+ ar->tx_pwr = dbm;
break;
default:
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
@@ -1293,7 +1315,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, dbm);
return 0;
}
@@ -1354,7 +1376,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
}
if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
- mode.pwr_mode) != 0) {
+ mode.pwr_mode) != 0) {
ath6kl_err("wmi_powermode_cmd failed\n");
return -EIO;
}
@@ -1403,7 +1425,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
- ath6kl_deinit_if_data(vif);
+ ath6kl_cfg80211_vif_cleanup(vif);
return 0;
}
@@ -1728,29 +1750,14 @@ static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
return 0;
}
-static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
+ struct cfg80211_wowlan *wow, u32 *filter)
{
- struct ath6kl_vif *vif;
- int ret, pos, left;
- u32 filter = 0;
- u16 i;
+ int ret, pos;
u8 mask[WOW_MASK_SIZE];
+ u16 i;
- vif = ath6kl_vif_first(ar);
- if (!vif)
- return -EIO;
-
- if (!ath6kl_cfg80211_ready(vif))
- return -EIO;
-
- if (!test_bit(CONNECTED, &vif->flags))
- return -EINVAL;
-
- /* Clear existing WOW patterns */
- for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
- ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
- WOW_LIST_ID, i);
- /* Configure new WOW patterns */
+ /* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) {
/*
@@ -1773,29 +1780,249 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
* matched from the first byte of received pkt in the firmware.
*/
ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
- vif->fw_vif_idx, WOW_LIST_ID,
- wow->patterns[i].pattern_len,
- 0 /* pattern offset */,
- wow->patterns[i].pattern, mask);
+ vif->fw_vif_idx, WOW_LIST_ID,
+ wow->patterns[i].pattern_len,
+ 0 /* pattern offset */,
+ wow->patterns[i].pattern, mask);
if (ret)
return ret;
}
if (wow->disconnect)
- filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+ *filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
if (wow->magic_pkt)
- filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+ *filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
if (wow->gtk_rekey_failure)
- filter |= WOW_FILTER_OPTION_GTK_ERROR;
+ *filter |= WOW_FILTER_OPTION_GTK_ERROR;
if (wow->eap_identity_req)
- filter |= WOW_FILTER_OPTION_EAP_REQ;
+ *filter |= WOW_FILTER_OPTION_EAP_REQ;
if (wow->four_way_handshake)
- filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+ *filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+ return 0;
+}
+
+static int ath6kl_wow_ap(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+ static const u8 unicst_pattern[] = { 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08 };
+ static const u8 unicst_mask[] = { 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x7f };
+ u8 unicst_offset = 0;
+ static const u8 arp_pattern[] = { 0x08, 0x06 };
+ static const u8 arp_mask[] = { 0xff, 0xff };
+ u8 arp_offset = 20;
+ static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
+ static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
+ u8 discvr_offset = 38;
+ static const u8 dhcp_pattern[] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x43 /* port 67 */ };
+ static const u8 dhcp_mask[] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff /* port 67 */ };
+ u8 dhcp_offset = 0;
+ int ret;
+
+ /* Setup unicast IP, EAPOL-like and ARP pkt pattern */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(unicst_pattern), unicst_offset,
+ unicst_pattern, unicst_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW unicast IP pattern\n");
+ return ret;
+ }
+
+ /* Setup all ARP pkt pattern */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(arp_pattern), arp_offset,
+ arp_pattern, arp_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW ARP pattern\n");
+ return ret;
+ }
+
+ /*
+ * Setup multicast pattern for mDNS 224.0.0.251,
+ * SSDP 239.255.255.250 and LLMNR 224.0.0.252
+ */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(discvr_pattern), discvr_offset,
+ discvr_pattern, discvr_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
+ return ret;
+ }
+
+ /* Setup all DHCP broadcast pkt pattern */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(dhcp_pattern), dhcp_offset,
+ dhcp_pattern, dhcp_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW DHCP broadcast pattern\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+ struct net_device *ndev = vif->ndev;
+ static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
+ static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
+ u8 discvr_offset = 38;
+ u8 mac_mask[ETH_ALEN];
+ int ret;
+
+ /* Setup unicast pkt pattern */
+ memset(mac_mask, 0xff, ETH_ALEN);
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ ETH_ALEN, 0, ndev->dev_addr,
+ mac_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW unicast pattern\n");
+ return ret;
+ }
+
+ /*
+ * Setup multicast pattern for mDNS 224.0.0.251,
+ * SSDP 239.255.255.250 and LLMNR 224.0.0.252
+ */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (ndev->flags & IFF_MULTICAST && netdev_mc_count(ndev) > 0)) {
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(discvr_pattern), discvr_offset,
+ discvr_pattern, discvr_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR "
+ "pattern\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ struct ath6kl_vif *vif;
+ int ret, left;
+ u32 filter = 0;
+ u16 i, bmiss_time;
+ u8 index = 0;
+ __be32 ips[MAX_IP_ADDRS];
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (!test_bit(CONNECTED, &vif->flags))
+ return -ENOTCONN;
+
+ if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
+ return -EINVAL;
+
+ /* Clear existing WOW patterns */
+ for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+ ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+ WOW_LIST_ID, i);
+
+ /*
+ * Skip the default WOW pattern configuration
+ * if the driver receives any WOW patterns from
+ * the user.
+ */
+ if (wow)
+ ret = ath6kl_wow_usr(ar, vif, wow, &filter);
+ else if (vif->nw_type == AP_NETWORK)
+ ret = ath6kl_wow_ap(ar, vif);
+ else
+ ret = ath6kl_wow_sta(ar, vif);
+
+ if (ret)
+ return ret;
+
+ netif_stop_queue(vif->ndev);
+
+ if (vif->nw_type != AP_NETWORK) {
+ ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_MAX_WOW_LISTEN_INTL,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Set listen interval x 15 times as bmiss time */
+ bmiss_time = ATH6KL_MAX_WOW_LISTEN_INTL * 15;
+ if (bmiss_time > ATH6KL_MAX_BMISS_TIME)
+ bmiss_time = ATH6KL_MAX_BMISS_TIME;
+
+ ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
+ bmiss_time, 0);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ 0xFFFF, 0, 0xFFFF, 0, 0, 0,
+ 0, 0, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_SUSPENDING;
+
+ /* Setup own IP addr for ARP agent. */
+ in_dev = __in_dev_get_rtnl(vif->ndev);
+ if (!in_dev)
+ goto skip_arp;
+
+ ifa = in_dev->ifa_list;
+ memset(&ips, 0, sizeof(ips));
+
+ /* Configure IP addr only if IP address count < MAX_IP_ADDRS */
+ while (index < MAX_IP_ADDRS && ifa) {
+ ips[index] = ifa->ifa_local;
+ ifa = ifa->ifa_next;
+ index++;
+ }
+
+ if (ifa) {
+ ath6kl_err("total IP addr count is exceeding fw limit\n");
+ return -EINVAL;
+ }
+
+ ret = ath6kl_wmi_set_ip_cmd(ar->wmi, vif->fw_vif_idx, ips[0], ips[1]);
+ if (ret) {
+ ath6kl_err("fail to setup ip for arp agent\n");
+ return ret;
+ }
+
+skip_arp:
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_WOW_MODE_ENABLE,
filter,
@@ -1803,11 +2030,26 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ret)
return ret;
+ clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_ASLEEP);
if (ret)
return ret;
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags),
+ WMI_TIMEOUT);
+ if (left == 0) {
+ ath6kl_warn("timeout, didn't get host sleep cmd "
+ "processed event\n");
+ ret = -ETIMEDOUT;
+ } else if (left < 0) {
+ ath6kl_warn("error while waiting for host sleep cmd "
+ "processed event %d\n", left);
+ ret = left;
+ }
+
if (ar->tx_pending[ar->ctrl_ep]) {
left = wait_event_interruptible_timeout(ar->event_wq,
ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
@@ -1832,15 +2074,46 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
if (!vif)
return -EIO;
+ ar->state = ATH6KL_STATE_RESUMING;
+
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
- return ret;
+ if (ret) {
+ ath6kl_warn("Failed to configure host sleep mode for "
+ "wow resume: %d\n", ret);
+ ar->state = ATH6KL_STATE_WOW;
+ return ret;
+ }
+
+ if (vif->nw_type != AP_NETWORK) {
+ ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ 0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->listen_intvl_t, 0);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->bmiss_time_t, 0);
+ if (ret)
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+
+ netif_wake_queue(vif->ndev);
+
+ return 0;
}
int ath6kl_cfg80211_suspend(struct ath6kl *ar,
enum ath6kl_cfg_suspend_mode mode,
struct cfg80211_wowlan *wow)
{
+ enum ath6kl_state prev_state;
int ret;
switch (mode) {
@@ -1851,11 +2124,14 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
/* Flush all non control pkts in TX path */
ath6kl_tx_data_cleanup(ar);
+ prev_state = ar->state;
+
ret = ath6kl_wow_suspend(ar, wow);
if (ret) {
- ath6kl_err("wow suspend failed: %d\n", ret);
+ ar->state = prev_state;
return ret;
}
+
ar->state = ATH6KL_STATE_WOW;
break;
@@ -1911,6 +2187,7 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
return 0;
}
+EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
int ath6kl_cfg80211_resume(struct ath6kl *ar)
{
@@ -1926,7 +2203,6 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
return ret;
}
- ar->state = ATH6KL_STATE_ON;
break;
case ATH6KL_STATE_DEEPSLEEP:
@@ -1962,6 +2238,7 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
return 0;
}
+EXPORT_SYMBOL(ath6kl_cfg80211_resume);
#ifdef CONFIG_PM
@@ -1999,6 +2276,9 @@ static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
*/
void ath6kl_check_wow_status(struct ath6kl *ar)
{
+ if (ar->state == ATH6KL_STATE_SUSPENDING)
+ return;
+
if (ar->state == ATH6KL_STATE_WOW)
ath6kl_cfg80211_resume(ar);
}
@@ -2014,7 +2294,18 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif;
+
+ /*
+ * 'dev' could be NULL if a channel change is required for the hardware
+ * device itself, instead of a particular VIF.
+ *
+ * FIXME: To be handled properly when monitor mode is supported.
+ */
+ if (!dev)
+ return -EBUSY;
+
+ vif = netdev_priv(dev);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
@@ -2069,19 +2360,51 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
return ret;
}
-static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
- struct beacon_parameters *info, bool add)
+static int ath6kl_set_ies(struct ath6kl_vif *vif,
+ struct cfg80211_beacon_data *info)
+{
+ struct ath6kl *ar = vif->ar;
+ int res;
+
+ /* this also clears IE in fw if it's not set */
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_BEACON,
+ info->beacon_ies,
+ info->beacon_ies_len);
+ if (res)
+ return res;
+
+ /* this also clears IE in fw if it's not set */
+ res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
+ info->proberesp_ies_len);
+ if (res)
+ return res;
+
+ /* this also clears IE in fw if it's not set */
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_RESP,
+ info->assocresp_ies,
+ info->assocresp_ies_len);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *info)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
struct ieee80211_mgmt *mgmt;
+ bool hidden = false;
u8 *ies;
int ies_len;
struct wmi_connect_cmd p;
int res;
int i, ret;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
@@ -2089,31 +2412,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
if (vif->next_mode != AP_NETWORK)
return -EOPNOTSUPP;
- if (info->beacon_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
- WMI_FRAME_BEACON,
- info->beacon_ies,
- info->beacon_ies_len);
- if (res)
- return res;
- }
- if (info->proberesp_ies) {
- res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
- info->proberesp_ies_len);
- if (res)
- return res;
- }
- if (info->assocresp_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
- WMI_FRAME_ASSOC_RESP,
- info->assocresp_ies,
- info->assocresp_ies_len);
- if (res)
- return res;
- }
-
- if (!add)
- return 0;
+ res = ath6kl_set_ies(vif, &info->beacon);
ar->ap_mode_bkey.valid = false;
@@ -2122,20 +2421,24 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
* info->dtim_period
*/
- if (info->head == NULL)
+ if (info->beacon.head == NULL)
return -EINVAL;
- mgmt = (struct ieee80211_mgmt *) info->head;
+ mgmt = (struct ieee80211_mgmt *) info->beacon.head;
ies = mgmt->u.beacon.variable;
- if (ies > info->head + info->head_len)
+ if (ies > info->beacon.head + info->beacon.head_len)
return -EINVAL;
- ies_len = info->head + info->head_len - ies;
+ ies_len = info->beacon.head + info->beacon.head_len - ies;
if (info->ssid == NULL)
return -EINVAL;
memcpy(vif->ssid, info->ssid, info->ssid_len);
vif->ssid_len = info->ssid_len;
if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
- return -EOPNOTSUPP; /* TODO */
+ hidden = true;
+
+ res = ath6kl_wmi_ap_hidden_ssid(ar->wmi, vif->fw_vif_idx, hidden);
+ if (res)
+ return res;
ret = ath6kl_set_auth_type(vif, info->auth_type);
if (ret)
@@ -2214,6 +2517,11 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
p.dot11_auth_mode = vif->dot11_auth_mode;
p.ch = cpu_to_le16(vif->next_chan);
+ /* Enable uAPSD support by default */
+ res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
+ if (res < 0)
+ return res;
+
if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
p.nw_subtype = SUBTYPE_P2PGO;
} else {
@@ -2231,19 +2539,21 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int ath6kl_add_beacon(struct wiphy *wiphy, struct net_device *dev,
- struct beacon_parameters *info)
+static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *beacon)
{
- return ath6kl_ap_beacon(wiphy, dev, info, true);
-}
+ struct ath6kl_vif *vif = netdev_priv(dev);
-static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
- struct beacon_parameters *info)
-{
- return ath6kl_ap_beacon(wiphy, dev, info, false);
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (vif->next_mode != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ return ath6kl_set_ies(vif, beacon);
}
-static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
+static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -2259,6 +2569,19 @@ static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
return 0;
}
+static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ const u8 *addr = mac ? mac : bcast_addr;
+
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH,
+ addr, WLAN_REASON_PREV_AUTH_NOT_VALID);
+}
+
static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_parameters *params)
{
@@ -2354,6 +2677,76 @@ static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
return ret;
}
+static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
+ u32 id,
+ u32 freq,
+ u32 wait,
+ const u8 *buf,
+ size_t len,
+ bool *more_data,
+ bool no_cck)
+{
+ struct ieee80211_mgmt *mgmt;
+ struct ath6kl_sta *conn;
+ bool is_psq_empty = false;
+ struct ath6kl_mgmt_buff *mgmt_buf;
+ size_t mgmt_buf_size;
+ struct ath6kl *ar = vif->ar;
+
+ mgmt = (struct ieee80211_mgmt *) buf;
+ if (is_multicast_ether_addr(mgmt->da))
+ return false;
+
+ conn = ath6kl_find_sta(vif, mgmt->da);
+ if (!conn)
+ return false;
+
+ if (conn->sta_flags & STA_PS_SLEEP) {
+ if (!(conn->sta_flags & STA_PS_POLLED)) {
+ /* Queue the frames if the STA is sleeping */
+ mgmt_buf_size = len + sizeof(struct ath6kl_mgmt_buff);
+ mgmt_buf = kmalloc(mgmt_buf_size, GFP_KERNEL);
+ if (!mgmt_buf)
+ return false;
+
+ INIT_LIST_HEAD(&mgmt_buf->list);
+ mgmt_buf->id = id;
+ mgmt_buf->freq = freq;
+ mgmt_buf->wait = wait;
+ mgmt_buf->len = len;
+ mgmt_buf->no_cck = no_cck;
+ memcpy(mgmt_buf->buf, buf, len);
+ spin_lock_bh(&conn->psq_lock);
+ is_psq_empty = skb_queue_empty(&conn->psq) &&
+ (conn->mgmt_psq_len == 0);
+ list_add_tail(&mgmt_buf->list, &conn->mgmt_psq);
+ conn->mgmt_psq_len++;
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this
+ * STA.
+ */
+ if (is_psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ conn->aid, 1);
+ return true;
+ }
+
+ /*
+ * This tx is because of a PsPoll.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->psq) || (conn->mgmt_psq_len != 0))
+ *more_data = true;
+ spin_unlock_bh(&conn->psq_lock);
+ }
+
+ return false;
+}
+
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
@@ -2365,6 +2758,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ath6kl_vif *vif = netdev_priv(dev);
u32 id;
const struct ieee80211_mgmt *mgmt;
+ bool more_data, queued;
mgmt = (const struct ieee80211_mgmt *) buf;
if (buf + len >= mgmt->u.probe_resp.variable &&
@@ -2390,22 +2784,19 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = id;
- if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
- ar->fw_capabilities)) {
- /*
- * If capable of doing P2P mgmt operations using
- * station interface, send additional information like
- * supported rates to advertise and xmit rates for
- * probe requests
- */
- return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
- chan->center_freq, wait,
- buf, len, no_cck);
- } else {
- return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
- chan->center_freq, wait,
- buf, len);
+ /* AP mode Power saving processing */
+ if (vif->nw_type == AP_NETWORK) {
+ queued = ath6kl_mgmt_powersave_ap(vif,
+ id, chan->center_freq,
+ wait, buf,
+ len, &more_data, no_cck);
+ if (queued)
+ return 0;
}
+
+ return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len, no_cck);
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
@@ -2518,6 +2909,12 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
@@ -2559,9 +2956,10 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.resume = __ath6kl_cfg80211_resume,
#endif
.set_channel = ath6kl_set_channel,
- .add_beacon = ath6kl_add_beacon,
- .set_beacon = ath6kl_set_beacon,
- .del_beacon = ath6kl_del_beacon,
+ .start_ap = ath6kl_start_ap,
+ .change_beacon = ath6kl_change_beacon,
+ .stop_ap = ath6kl_stop_ap,
+ .del_station = ath6kl_del_station,
.change_station = ath6kl_change_station,
.remain_on_channel = ath6kl_remain_on_channel,
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
@@ -2629,122 +3027,9 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
ath6kl_cfg80211_stop(vif);
}
-struct ath6kl *ath6kl_core_alloc(struct device *dev)
-{
- struct ath6kl *ar;
- struct wiphy *wiphy;
- u8 ctr;
-
- /* create a new wiphy for use with cfg80211 */
- wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
-
- if (!wiphy) {
- ath6kl_err("couldn't allocate wiphy device\n");
- return NULL;
- }
-
- ar = wiphy_priv(wiphy);
- ar->p2p = !!ath6kl_p2p;
- ar->wiphy = wiphy;
- ar->dev = dev;
-
- ar->vif_max = 1;
-
- ar->max_norm_iface = 1;
-
- spin_lock_init(&ar->lock);
- spin_lock_init(&ar->mcastpsq_lock);
- spin_lock_init(&ar->list_lock);
-
- init_waitqueue_head(&ar->event_wq);
- sema_init(&ar->sem, 1);
-
- INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
- INIT_LIST_HEAD(&ar->vif_list);
-
- clear_bit(WMI_ENABLED, &ar->flag);
- clear_bit(SKIP_SCAN, &ar->flag);
- clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
-
- ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
- ar->listen_intvl_b = 0;
- ar->tx_pwr = 0;
-
- ar->intra_bss = 1;
- ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
-
- ar->state = ATH6KL_STATE_OFF;
-
- memset((u8 *)ar->sta_list, 0,
- AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
-
- /* Init the PS queues */
- for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
- spin_lock_init(&ar->sta_list[ctr].psq_lock);
- skb_queue_head_init(&ar->sta_list[ctr].psq);
- }
-
- skb_queue_head_init(&ar->mcastpsq);
-
- memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
-
- return ar;
-}
-
-int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
-{
- struct wiphy *wiphy = ar->wiphy;
- int ret;
-
- wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
-
- wiphy->max_remain_on_channel_duration = 5000;
-
- /* set device pointer for wiphy */
- set_wiphy_dev(wiphy, ar->dev);
-
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP);
- if (ar->p2p) {
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT);
- }
-
- /* max num of ssids that can be probed during scanning */
- wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
- wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
- wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
- wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
- wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wiphy->cipher_suites = cipher_suites;
- wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
-
- wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE |
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_4WAY_HANDSHAKE;
- wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
- wiphy->wowlan.pattern_min_len = 1;
- wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
-
- wiphy->max_sched_scan_ssids = 10;
-
- ret = wiphy_register(wiphy);
- if (ret < 0) {
- ath6kl_err("couldn't register wiphy device\n");
- return ret;
- }
-
- return 0;
-}
-
-static int ath6kl_init_if_data(struct ath6kl_vif *vif)
+static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
{
- vif->aggr_cntxt = aggr_init(vif->ndev);
+ vif->aggr_cntxt = aggr_init(vif);
if (!vif->aggr_cntxt) {
ath6kl_err("failed to initialize aggr\n");
return -ENOMEM;
@@ -2758,12 +3043,15 @@ static int ath6kl_init_if_data(struct ath6kl_vif *vif)
set_bit(WMM_ENABLED, &vif->flags);
spin_lock_init(&vif->if_lock);
+ INIT_LIST_HEAD(&vif->mc_filter);
+
return 0;
}
-void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
+void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
+ struct ath6kl_mc_filter *mc_filter, *tmp;
aggr_module_destroy(vif->aggr_cntxt);
@@ -2772,6 +3060,11 @@ void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
if (vif->nw_type == ADHOC_NETWORK)
ar->ibss_if_active = false;
+ list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
+ list_del(&mc_filter->list);
+ kfree(mc_filter);
+ }
+
unregister_netdevice(vif->ndev);
ar->num_vif--;
@@ -2797,7 +3090,10 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->wdev.netdev = ndev;
vif->wdev.iftype = type;
vif->fw_vif_idx = fw_vif_idx;
- vif->nw_type = vif->next_mode = nw_type;
+ vif->nw_type = nw_type;
+ vif->next_mode = nw_type;
+ vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
+ vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0)
@@ -2808,8 +3104,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
ath6kl_init_control_info(vif);
- /* TODO: Pass interface specific pointer instead of ar */
- if (ath6kl_init_if_data(vif))
+ if (ath6kl_cfg80211_vif_init(vif))
goto err;
if (register_netdevice(ndev))
@@ -2836,8 +3131,107 @@ err:
return NULL;
}
-void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
+int ath6kl_cfg80211_init(struct ath6kl *ar)
+{
+ struct wiphy *wiphy = ar->wiphy;
+ int ret;
+
+ wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wiphy->max_remain_on_channel_duration = 5000;
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wiphy, ar->dev);
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ if (ar->p2p) {
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
+ }
+
+ /* max num of ssids that can be probed during scanning */
+ wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+ wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
+ wiphy->wowlan.pattern_min_len = 1;
+ wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+
+ wiphy->max_sched_scan_ssids = 10;
+
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+ WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+
+ ar->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+
+ ret = wiphy_register(wiphy);
+ if (ret < 0) {
+ ath6kl_err("couldn't register wiphy device\n");
+ return ret;
+ }
+
+ ar->wiphy_registered = true;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_cleanup(struct ath6kl *ar)
{
wiphy_unregister(ar->wiphy);
+
+ ar->wiphy_registered = false;
+}
+
+struct ath6kl *ath6kl_cfg80211_create(void)
+{
+ struct ath6kl *ar;
+ struct wiphy *wiphy;
+
+ /* create a new wiphy for use with cfg80211 */
+ wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+ if (!wiphy) {
+ ath6kl_err("couldn't allocate wiphy device\n");
+ return NULL;
+ }
+
+ ar = wiphy_priv(wiphy);
+ ar->wiphy = wiphy;
+
+ return ar;
+}
+
+/* Note: ar variable must not be accessed after calling this! */
+void ath6kl_cfg80211_destroy(struct ath6kl *ar)
+{
+ int i;
+
+ for (i = 0; i < AP_MAX_NUM_STA; i++)
+ kfree(ar->sta_list[i].aggr_conn);
+
wiphy_free(ar->wiphy);
}
+
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 81f20a572315..c5def436417f 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,10 +28,6 @@ enum ath6kl_cfg_suspend_mode {
struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
-int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
-struct ath6kl *ath6kl_core_alloc(struct device *dev);
-void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
-
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
@@ -53,7 +50,15 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
int ath6kl_cfg80211_resume(struct ath6kl *ar);
+void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif);
+
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
+int ath6kl_cfg80211_init(struct ath6kl *ar);
+void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
+
+struct ath6kl *ath6kl_cfg80211_create(void);
+void ath6kl_cfg80211_destroy(struct ath6kl *ar);
+
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index bfd6597763da..a60e78c0472f 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -79,8 +80,5 @@ struct ath6kl;
enum htc_credit_dist_reason;
struct ath6kl_htc_credit_info;
-struct ath6kl *ath6kl_core_alloc(struct device *sdev);
-int ath6kl_core_init(struct ath6kl *ar);
-void ath6kl_core_cleanup(struct ath6kl *ar);
struct sk_buff *ath6kl_buf_alloc(int size);
#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
new file mode 100644
index 000000000000..45e641f3a41b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/export.h>
+
+#include "debug.h"
+#include "hif-ops.h"
+#include "cfg80211.h"
+
+unsigned int debug_mask;
+static unsigned int suspend_mode;
+static unsigned int wow_mode;
+static unsigned int uart_debug;
+static unsigned int ath6kl_p2p;
+static unsigned int testmode;
+
+module_param(debug_mask, uint, 0644);
+module_param(suspend_mode, uint, 0644);
+module_param(wow_mode, uint, 0644);
+module_param(uart_debug, uint, 0644);
+module_param(ath6kl_p2p, uint, 0644);
+module_param(testmode, uint, 0644);
+
+int ath6kl_core_init(struct ath6kl *ar)
+{
+ struct ath6kl_bmi_target_info targ_info;
+ struct net_device *ndev;
+ int ret = 0, i;
+
+ ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
+ if (!ar->ath6kl_wq)
+ return -ENOMEM;
+
+ ret = ath6kl_bmi_init(ar);
+ if (ret)
+ goto err_wq;
+
+ /*
+ * Turn on power to get hardware (target) version and leave power
+ * on delibrately as we will boot the hardware anyway within few
+ * seconds.
+ */
+ ret = ath6kl_hif_power_on(ar);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_power_off;
+
+ ar->version.target_ver = le32_to_cpu(targ_info.version);
+ ar->target_type = le32_to_cpu(targ_info.type);
+ ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
+
+ ret = ath6kl_init_hw_params(ar);
+ if (ret)
+ goto err_power_off;
+
+ ar->htc_target = ath6kl_htc_create(ar);
+
+ if (!ar->htc_target) {
+ ret = -ENOMEM;
+ goto err_power_off;
+ }
+
+ ar->testmode = testmode;
+
+ ret = ath6kl_init_fetch_firmwares(ar);
+ if (ret)
+ goto err_htc_cleanup;
+
+ /* FIXME: we should free all firmwares in the error cases below */
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init(ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ ret = -EIO;
+ goto err_htc_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ ath6kl_cookie_init(ar);
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ if (suspend_mode &&
+ suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
+ suspend_mode <= WLAN_POWER_STATE_WOW)
+ ar->suspend_mode = suspend_mode;
+ else
+ ar->suspend_mode = 0;
+
+ if (suspend_mode == WLAN_POWER_STATE_WOW &&
+ (wow_mode == WLAN_POWER_STATE_CUT_PWR ||
+ wow_mode == WLAN_POWER_STATE_DEEP_SLEEP))
+ ar->wow_suspend_mode = wow_mode;
+ else
+ ar->wow_suspend_mode = 0;
+
+ if (uart_debug)
+ ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
+
+ set_bit(FIRST_BOOT, &ar->flag);
+
+ ath6kl_debug_init(ar);
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_err("Failed to start hardware: %d\n", ret);
+ goto err_rxbuf_cleanup;
+ }
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ ret = ath6kl_cfg80211_init(ar);
+ if (ret)
+ goto err_rxbuf_cleanup;
+
+ ret = ath6kl_debug_init_fs(ar);
+ if (ret) {
+ wiphy_unregister(ar->wiphy);
+ goto err_rxbuf_cleanup;
+ }
+
+ for (i = 0; i < ar->vif_max; i++)
+ ar->avail_idx_map |= BIT(i);
+
+ rtnl_lock();
+
+ /* Add an initial station interface */
+ ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+ INFRA_NETWORK);
+
+ rtnl_unlock();
+
+ if (!ndev) {
+ ath6kl_err("Failed to instantiate a network device\n");
+ ret = -ENOMEM;
+ wiphy_unregister(ar->wiphy);
+ goto err_rxbuf_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
+ __func__, ndev->name, ndev, ar);
+
+ return ret;
+
+err_rxbuf_cleanup:
+ ath6kl_debug_cleanup(ar);
+ ath6kl_htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+err_htc_cleanup:
+ ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+ ath6kl_hif_power_off(ar);
+err_bmi_cleanup:
+ ath6kl_bmi_cleanup(ar);
+err_wq:
+ destroy_workqueue(ar->ath6kl_wq);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath6kl_core_init);
+
+struct ath6kl *ath6kl_core_create(struct device *dev)
+{
+ struct ath6kl *ar;
+ u8 ctr;
+
+ ar = ath6kl_cfg80211_create();
+ if (!ar)
+ return NULL;
+
+ ar->p2p = !!ath6kl_p2p;
+ ar->dev = dev;
+
+ ar->vif_max = 1;
+
+ ar->max_norm_iface = 1;
+
+ spin_lock_init(&ar->lock);
+ spin_lock_init(&ar->mcastpsq_lock);
+ spin_lock_init(&ar->list_lock);
+
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ INIT_LIST_HEAD(&ar->vif_list);
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ clear_bit(SKIP_SCAN, &ar->flag);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ ar->tx_pwr = 0;
+ ar->intra_bss = 1;
+ ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ skb_queue_head_init(&ar->sta_list[ctr].apsdq);
+ ar->sta_list[ctr].mgmt_psq_len = 0;
+ INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq);
+ ar->sta_list[ctr].aggr_conn =
+ kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
+ if (!ar->sta_list[ctr].aggr_conn) {
+ ath6kl_err("Failed to allocate memory for sta aggregation information\n");
+ ath6kl_core_destroy(ar);
+ return NULL;
+ }
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+ return ar;
+}
+EXPORT_SYMBOL(ath6kl_core_create);
+
+void ath6kl_core_cleanup(struct ath6kl *ar)
+{
+ ath6kl_hif_power_off(ar);
+
+ destroy_workqueue(ar->ath6kl_wq);
+
+ if (ar->htc_target)
+ ath6kl_htc_cleanup(ar->htc_target);
+
+ ath6kl_cookie_cleanup(ar);
+
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+
+ ath6kl_bmi_cleanup(ar);
+
+ ath6kl_debug_cleanup(ar);
+
+ kfree(ar->fw_board);
+ kfree(ar->fw_otp);
+ kfree(ar->fw);
+ kfree(ar->fw_patch);
+ kfree(ar->fw_testscript);
+
+ ath6kl_cfg80211_cleanup(ar);
+}
+EXPORT_SYMBOL(ath6kl_core_cleanup);
+
+void ath6kl_core_destroy(struct ath6kl *ar)
+{
+ ath6kl_cfg80211_destroy(ar);
+}
+EXPORT_SYMBOL(ath6kl_core_destroy);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for AR600x SDIO and USB devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index c863a28f2e0c..f1dd8906be45 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -44,6 +45,10 @@
#define ATH6KL_MAX_ENDPOINTS 4
#define MAX_NODE_NUM 15
+#define ATH6KL_APSD_ALL_FRAME 0xFFFF
+#define ATH6KL_APSD_NUM_OF_AC 0x4
+#define ATH6KL_APSD_FRAME_MASK 0xF
+
/* Extra bytes for htc header alignment */
#define ATH6KL_HTC_ALIGN_BYTES 3
@@ -55,8 +60,9 @@
#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
#define DISCON_TIMER_INTVAL 10000 /* in msec */
-#define A_DEFAULT_LISTEN_INTERVAL 100
-#define A_MAX_WOW_LISTEN_INTERVAL 1000
+
+/* Channel dwell time in fg scan */
+#define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */
/* includes also the null byte */
#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL"
@@ -97,45 +103,49 @@ struct ath6kl_fw_ie {
u8 data[0];
};
+#define ATH6KL_FW_API2_FILE "fw-2.bin"
+#define ATH6KL_FW_API3_FILE "fw-3.bin"
+
/* AR6003 1.0 definitions */
#define AR6003_HW_1_0_VERSION 0x300002ba
/* AR6003 2.0 definitions */
#define AR6003_HW_2_0_VERSION 0x30000384
#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
-#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
-#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
-#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
-#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
-#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
+#define AR6003_HW_2_0_FW_DIR "ath6k/AR6003/hw2.0"
+#define AR6003_HW_2_0_OTP_FILE "otp.bin.z77"
+#define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77"
+#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
+#define AR6003_HW_2_0_PATCH_FILE "data.patch.bin"
#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
"ath6k/AR6003/hw2.0/bdata.SD31.bin"
/* AR6003 3.0 definitions */
#define AR6003_HW_2_1_1_VERSION 0x30000582
-#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
-#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
-#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \
- "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
-#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
-#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
+#define AR6003_HW_2_1_1_FW_DIR "ath6k/AR6003/hw2.1.1"
+#define AR6003_HW_2_1_1_OTP_FILE "otp.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_FILE "athwlan.bin"
+#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
+#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin"
+#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin"
+#define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin"
#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
"ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
/* AR6004 1.0 definitions */
#define AR6004_HW_1_0_VERSION 0x30000623
-#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin"
-#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin"
+#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0"
+#define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
"ath6k/AR6004/hw1.0/bdata.DB132.bin"
/* AR6004 1.1 definitions */
#define AR6004_HW_1_1_VERSION 0x30000001
-#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin"
-#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin"
+#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1"
+#define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
"ath6k/AR6004/hw1.1/bdata.DB132.bin"
@@ -144,6 +154,8 @@ struct ath6kl_fw_ie {
#define STA_PS_AWAKE BIT(0)
#define STA_PS_SLEEP BIT(1)
#define STA_PS_POLLED BIT(2)
+#define STA_PS_APSD_TRIGGER BIT(3)
+#define STA_PS_APSD_EOSP BIT(4)
/* HTC TX packet tagging definitions */
#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
@@ -173,6 +185,11 @@ struct ath6kl_fw_ie {
#define MBOX_YIELD_LIMIT 99
+#define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */
+#define ATH6KL_DEFAULT_BMISS_TIME 1500
+#define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */
+#define ATH6KL_MAX_BMISS_TIME 5000
+
/* configuration lags */
/*
* ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
@@ -186,7 +203,7 @@ struct ath6kl_fw_ie {
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
#define ATH6KL_CONF_ENABLE_11N BIT(2)
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
-#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
+#define ATH6KL_CONF_UART_DEBUG BIT(4)
enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON,
@@ -216,6 +233,12 @@ struct rxtid {
u32 hold_q_sz;
struct skb_hold_q *hold_q;
struct sk_buff_head q;
+
+ /*
+ * FIXME: No clue what this should protect. Apparently it should
+ * protect some of the fields above but they are also accessed
+ * without taking the lock.
+ */
spinlock_t lock;
};
@@ -231,14 +254,19 @@ struct rxtid_stats {
u32 num_bar;
};
-struct aggr_info {
+struct aggr_info_conn {
u8 aggr_sz;
u8 timer_scheduled;
struct timer_list timer;
struct net_device *dev;
struct rxtid rx_tid[NUM_OF_TIDS];
- struct sk_buff_head free_q;
struct rxtid_stats stat[NUM_OF_TIDS];
+ struct aggr_info *aggr_info;
+};
+
+struct aggr_info {
+ struct aggr_info_conn *aggr_conn;
+ struct sk_buff_head rx_amsdu_freeq;
};
struct ath6kl_wep_key {
@@ -270,6 +298,16 @@ struct ath6kl_cookie {
struct ath6kl_cookie *arc_list_next;
};
+struct ath6kl_mgmt_buff {
+ struct list_head list;
+ u32 freq;
+ u32 wait;
+ u32 id;
+ bool no_cck;
+ size_t len;
+ u8 buf[0];
+};
+
struct ath6kl_sta {
u16 sta_flags;
u8 mac[ETH_ALEN];
@@ -279,7 +317,15 @@ struct ath6kl_sta {
u8 auth;
u8 wpa_ie[ATH6KL_MAX_IE];
struct sk_buff_head psq;
+
+ /* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */
spinlock_t psq_lock;
+
+ struct list_head mgmt_psq;
+ size_t mgmt_psq_len;
+ u8 apsd_info;
+ struct sk_buff_head apsdq;
+ struct aggr_info_conn *aggr_conn;
};
struct ath6kl_version {
@@ -408,6 +454,13 @@ enum ath6kl_hif_type {
ATH6KL_HIF_TYPE_USB,
};
+/* Max number of filters that hw supports */
+#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
+struct ath6kl_mc_filter {
+ struct list_head list;
+ char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
+};
+
/*
* Driver's maximum limit, note that some firmwares support only one vif
* and the runtime (current) limit must be checked from ar->vif_max.
@@ -426,6 +479,7 @@ enum ath6kl_vif_state {
DTIM_PERIOD_AVAIL,
WLAN_ENABLED,
STATS_UPDATE_PEND,
+ HOST_SLEEP_MODE_CMD_PROCESSED,
};
struct ath6kl_vif {
@@ -468,9 +522,13 @@ struct ath6kl_vif {
bool probe_req_report;
u16 next_chan;
u16 assoc_bss_beacon_int;
+ u16 listen_intvl_t;
+ u16 bmiss_time_t;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
+
+ struct list_head mc_filter;
};
#define WOW_LIST_ID 0
@@ -493,6 +551,8 @@ enum ath6kl_dev_state {
enum ath6kl_state {
ATH6KL_STATE_OFF,
ATH6KL_STATE_ON,
+ ATH6KL_STATE_SUSPENDING,
+ ATH6KL_STATE_RESUMING,
ATH6KL_STATE_DEEPSLEEP,
ATH6KL_STATE_CUTPOWER,
ATH6KL_STATE_WOW,
@@ -504,6 +564,7 @@ struct ath6kl {
struct wiphy *wiphy;
enum ath6kl_state state;
+ unsigned int testmode;
struct ath6kl_bmi bmi;
const struct ath6kl_hif_ops *hif_ops;
@@ -520,10 +581,14 @@ struct ath6kl {
unsigned int vif_max;
u8 max_norm_iface;
u8 avail_idx_map;
+
+ /*
+ * Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie()
+ * calls, tx_pending and total_tx_data_pend.
+ */
spinlock_t lock;
+
struct semaphore sem;
- u16 listen_intvl_b;
- u16 listen_intvl_t;
u8 lrssi_roam_threshold;
struct ath6kl_version version;
u32 target_type;
@@ -549,7 +614,13 @@ struct ath6kl {
u8 sta_list_index;
struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq;
+
+ /*
+ * FIXME: protects access to mcastpsq but is actually useless as
+ * all skbe_queue_*() functions provide serialisation themselves
+ */
spinlock_t mcastpsq_lock;
+
u8 intra_bss;
struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3];
@@ -574,17 +645,25 @@ struct ath6kl {
u32 board_addr;
u32 refclk_hz;
u32 uarttx_pin;
+ u32 testscript_addr;
+
+ struct ath6kl_hw_fw {
+ const char *dir;
+ const char *otp;
+ const char *fw;
+ const char *tcmd;
+ const char *patch;
+ const char *utf;
+ const char *testscript;
+ } fw;
- const char *fw_otp;
- const char *fw;
- const char *fw_tcmd;
- const char *fw_patch;
- const char *fw_api2;
const char *fw_board;
const char *fw_default_board;
} hw;
u16 conf_flags;
+ u16 suspend_mode;
+ u16 wow_suspend_mode;
wait_queue_head_t event_wq;
struct ath6kl_mbox_info mbox_info;
@@ -603,6 +682,10 @@ struct ath6kl {
u8 *fw_patch;
size_t fw_patch_len;
+ u8 *fw_testscript;
+ size_t fw_testscript_len;
+
+ unsigned int fw_api;
unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN];
struct workqueue_struct *ath6kl_wq;
@@ -611,12 +694,16 @@ struct ath6kl {
bool p2p;
+ bool wiphy_registered;
+
#ifdef CONFIG_ATH6KL_DEBUG
struct {
- struct circ_buf fwlog_buf;
- spinlock_t fwlog_lock;
- void *fwlog_tmp;
+ struct sk_buff_head fwlog_queue;
+ struct completion fwlog_completion;
+ bool fwlog_open;
+
u32 fwlog_mask;
+
unsigned int dbgfs_diag_reg;
u32 diag_reg_addr_wr;
u32 diag_reg_val_wr;
@@ -676,7 +763,9 @@ struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
-struct aggr_info *aggr_init(struct net_device *dev);
+struct aggr_info *aggr_init(struct ath6kl_vif *vif);
+void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
+ struct aggr_info_conn *aggr_conn);
void ath6kl_rx_refill(struct htc_target *target,
enum htc_endpoint_id endpoint);
void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
@@ -684,12 +773,12 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
enum htc_endpoint_id endpoint,
int len);
void aggr_module_destroy(struct aggr_info *aggr_info);
-void aggr_reset_state(struct aggr_info *aggr_info);
+void aggr_reset_state(struct aggr_info_conn *aggr_conn);
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
-void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
@@ -700,7 +789,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
- u8 assoc_req_len, u8 *assoc_info);
+ u8 assoc_req_len, u8 *assoc_info, u8 apsd_info);
void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 prot_reason_status);
@@ -723,12 +812,18 @@ void ath6kl_wakeup_event(void *dev);
void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
bool wait_fot_compltn, bool cold_reset);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
-void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
-void ath6kl_core_free(struct ath6kl *ar);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
int ath6kl_init_hw_start(struct ath6kl *ar);
int ath6kl_init_hw_stop(struct ath6kl *ar);
+int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
+int ath6kl_init_hw_params(struct ath6kl *ar);
+
void ath6kl_check_wow_status(struct ath6kl *ar);
+struct ath6kl *ath6kl_core_create(struct device *dev);
+int ath6kl_core_init(struct ath6kl *ar);
+void ath6kl_core_cleanup(struct ath6kl *ar);
+void ath6kl_core_destroy(struct ath6kl *ar);
+
#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index eb808b46f94c..552adb3f80d0 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -16,7 +17,7 @@
#include "core.h"
-#include <linux/circ_buf.h>
+#include <linux/skbuff.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
@@ -32,9 +33,8 @@ struct ath6kl_fwlog_slot {
u8 payload[0];
};
-#define ATH6KL_FWLOG_SIZE 32768
-#define ATH6KL_FWLOG_SLOT_SIZE (sizeof(struct ath6kl_fwlog_slot) + \
- ATH6KL_FWLOG_PAYLOAD_SIZE)
+#define ATH6KL_FWLOG_MAX_ENTRIES 20
+
#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
int ath6kl_printk(const char *level, const char *fmt, ...)
@@ -54,9 +54,42 @@ int ath6kl_printk(const char *level, const char *fmt, ...)
return rtn;
}
+EXPORT_SYMBOL(ath6kl_printk);
#ifdef CONFIG_ATH6KL_DEBUG
+void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!(debug_mask & mask))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath6kl_dbg);
+
+void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ if (debug_mask & mask) {
+ if (msg)
+ ath6kl_dbg(mask, "%s\n", msg);
+
+ print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+ }
+}
+EXPORT_SYMBOL(ath6kl_dbg_dump);
+
#define REG_OUTPUT_LEN_PER_LINE 25
#define REGTYPE_STR_LEN 100
@@ -82,63 +115,63 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_enable_reg *irq_enable_reg)
{
- ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
+ ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
if (irq_proc_reg != NULL) {
- ath6kl_dbg(ATH6KL_DBG_ANY,
- "Host Int status: 0x%x\n",
- irq_proc_reg->host_int_status);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Host Int status: 0x%x\n",
+ irq_proc_reg->host_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"CPU Int status: 0x%x\n",
- irq_proc_reg->cpu_int_status);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ irq_proc_reg->cpu_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"Error Int status: 0x%x\n",
- irq_proc_reg->error_int_status);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ irq_proc_reg->error_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"Counter Int status: 0x%x\n",
- irq_proc_reg->counter_int_status);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ irq_proc_reg->counter_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"Mbox Frame: 0x%x\n",
- irq_proc_reg->mbox_frame);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ irq_proc_reg->mbox_frame);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"Rx Lookahead Valid: 0x%x\n",
- irq_proc_reg->rx_lkahd_valid);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ irq_proc_reg->rx_lkahd_valid);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"Rx Lookahead 0: 0x%x\n",
- irq_proc_reg->rx_lkahd[0]);
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ irq_proc_reg->rx_lkahd[0]);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
"Rx Lookahead 1: 0x%x\n",
- irq_proc_reg->rx_lkahd[1]);
+ irq_proc_reg->rx_lkahd[1]);
if (dev->ar->mbox_info.gmbox_addr != 0) {
/*
* If the target supports GMBOX hardware, dump some
* additional state.
*/
- ath6kl_dbg(ATH6KL_DBG_ANY,
- "GMBOX Host Int status 2: 0x%x\n",
- irq_proc_reg->host_int_status2);
- ath6kl_dbg(ATH6KL_DBG_ANY,
- "GMBOX RX Avail: 0x%x\n",
- irq_proc_reg->gmbox_rx_avail);
- ath6kl_dbg(ATH6KL_DBG_ANY,
- "GMBOX lookahead alias 0: 0x%x\n",
- irq_proc_reg->rx_gmbox_lkahd_alias[0]);
- ath6kl_dbg(ATH6KL_DBG_ANY,
- "GMBOX lookahead alias 1: 0x%x\n",
- irq_proc_reg->rx_gmbox_lkahd_alias[1]);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX Host Int status 2: 0x%x\n",
+ irq_proc_reg->host_int_status2);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX RX Avail: 0x%x\n",
+ irq_proc_reg->gmbox_rx_avail);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX lookahead alias 0: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[0]);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX lookahead alias 1: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[1]);
}
}
if (irq_enable_reg != NULL) {
- ath6kl_dbg(ATH6KL_DBG_ANY,
- "Int status Enable: 0x%x\n",
- irq_enable_reg->int_status_en);
- ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
- irq_enable_reg->cntr_int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Int status Enable: 0x%x\n",
+ irq_enable_reg->int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status Enable: 0x%x\n",
+ irq_enable_reg->cntr_int_status_en);
}
- ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "<------------------------------->\n");
}
static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
@@ -175,9 +208,6 @@ void dump_cred_dist_stats(struct htc_target *target)
{
struct htc_endpoint_credit_dist *ep_list;
- if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
- return;
-
list_for_each_entry(ep_list, &target->cred_dist_list, list)
dump_cred_dist(ep_list);
@@ -238,105 +268,103 @@ static const struct file_operations fops_war_stats = {
.llseek = default_llseek,
};
-static void ath6kl_debug_fwlog_add(struct ath6kl *ar, const void *buf,
- size_t buf_len)
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
{
- struct circ_buf *fwlog = &ar->debug.fwlog_buf;
- size_t space;
- int i;
+ struct ath6kl_fwlog_slot *slot;
+ struct sk_buff *skb;
+ size_t slot_len;
- /* entries must all be equal size */
- if (WARN_ON(buf_len != ATH6KL_FWLOG_SLOT_SIZE))
+ if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
return;
- space = CIRC_SPACE(fwlog->head, fwlog->tail, ATH6KL_FWLOG_SIZE);
- if (space < buf_len)
- /* discard oldest slot */
- fwlog->tail = (fwlog->tail + ATH6KL_FWLOG_SLOT_SIZE) &
- (ATH6KL_FWLOG_SIZE - 1);
+ slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE;
- for (i = 0; i < buf_len; i += space) {
- space = CIRC_SPACE_TO_END(fwlog->head, fwlog->tail,
- ATH6KL_FWLOG_SIZE);
+ skb = alloc_skb(slot_len, GFP_KERNEL);
+ if (!skb)
+ return;
- if ((size_t) space > buf_len - i)
- space = buf_len - i;
+ slot = (struct ath6kl_fwlog_slot *) skb_put(skb, slot_len);
+ slot->timestamp = cpu_to_le32(jiffies);
+ slot->length = cpu_to_le32(len);
+ memcpy(slot->payload, buf, len);
- memcpy(&fwlog->buf[fwlog->head], buf, space);
- fwlog->head = (fwlog->head + space) & (ATH6KL_FWLOG_SIZE - 1);
- }
+ /* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */
+ memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len);
-}
+ spin_lock(&ar->debug.fwlog_queue.lock);
-void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
-{
- struct ath6kl_fwlog_slot *slot = ar->debug.fwlog_tmp;
- size_t slot_len;
+ __skb_queue_tail(&ar->debug.fwlog_queue, skb);
+ complete(&ar->debug.fwlog_completion);
- if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
- return;
+ /* drop oldest entries */
+ while (skb_queue_len(&ar->debug.fwlog_queue) >
+ ATH6KL_FWLOG_MAX_ENTRIES) {
+ skb = __skb_dequeue(&ar->debug.fwlog_queue);
+ kfree_skb(skb);
+ }
- spin_lock_bh(&ar->debug.fwlog_lock);
+ spin_unlock(&ar->debug.fwlog_queue.lock);
- slot->timestamp = cpu_to_le32(jiffies);
- slot->length = cpu_to_le32(len);
- memcpy(slot->payload, buf, len);
+ return;
+}
- slot_len = sizeof(*slot) + len;
+static int ath6kl_fwlog_open(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
- if (slot_len < ATH6KL_FWLOG_SLOT_SIZE)
- memset(slot->payload + len, 0,
- ATH6KL_FWLOG_SLOT_SIZE - slot_len);
+ if (ar->debug.fwlog_open)
+ return -EBUSY;
- ath6kl_debug_fwlog_add(ar, slot, ATH6KL_FWLOG_SLOT_SIZE);
+ ar->debug.fwlog_open = true;
- spin_unlock_bh(&ar->debug.fwlog_lock);
+ file->private_data = inode->i_private;
+ return 0;
}
-static bool ath6kl_debug_fwlog_empty(struct ath6kl *ar)
+static int ath6kl_fwlog_release(struct inode *inode, struct file *file)
{
- return CIRC_CNT(ar->debug.fwlog_buf.head,
- ar->debug.fwlog_buf.tail,
- ATH6KL_FWLOG_SLOT_SIZE) == 0;
+ struct ath6kl *ar = inode->i_private;
+
+ ar->debug.fwlog_open = false;
+
+ return 0;
}
static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
- struct circ_buf *fwlog = &ar->debug.fwlog_buf;
- size_t len = 0, buf_len = count;
+ struct sk_buff *skb;
ssize_t ret_cnt;
+ size_t len = 0;
char *buf;
- int ccnt;
- buf = vmalloc(buf_len);
+ buf = vmalloc(count);
if (!buf)
return -ENOMEM;
/* read undelivered logs from firmware */
ath6kl_read_fwlogs(ar);
- spin_lock_bh(&ar->debug.fwlog_lock);
+ spin_lock(&ar->debug.fwlog_queue.lock);
- while (len < buf_len && !ath6kl_debug_fwlog_empty(ar)) {
- ccnt = CIRC_CNT_TO_END(fwlog->head, fwlog->tail,
- ATH6KL_FWLOG_SIZE);
+ while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
+ if (skb->len > count - len) {
+ /* not enough space, put skb back and leave */
+ __skb_queue_head(&ar->debug.fwlog_queue, skb);
+ break;
+ }
- if ((size_t) ccnt > buf_len - len)
- ccnt = buf_len - len;
- memcpy(buf + len, &fwlog->buf[fwlog->tail], ccnt);
- len += ccnt;
+ memcpy(buf + len, skb->data, skb->len);
+ len += skb->len;
- fwlog->tail = (fwlog->tail + ccnt) &
- (ATH6KL_FWLOG_SIZE - 1);
+ kfree_skb(skb);
}
- spin_unlock_bh(&ar->debug.fwlog_lock);
+ spin_unlock(&ar->debug.fwlog_queue.lock);
- if (WARN_ON(len > buf_len))
- len = buf_len;
+ /* FIXME: what to do if len == 0? */
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -346,12 +374,87 @@ static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
}
static const struct file_operations fops_fwlog = {
- .open = ath6kl_debugfs_open,
+ .open = ath6kl_fwlog_open,
+ .release = ath6kl_fwlog_release,
.read = ath6kl_fwlog_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
+static ssize_t ath6kl_fwlog_block_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct sk_buff *skb;
+ ssize_t ret_cnt;
+ size_t len = 0, not_copied;
+ char *buf;
+ int ret;
+
+ buf = vmalloc(count);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock(&ar->debug.fwlog_queue.lock);
+
+ if (skb_queue_len(&ar->debug.fwlog_queue) == 0) {
+ /* we must init under queue lock */
+ init_completion(&ar->debug.fwlog_completion);
+
+ spin_unlock(&ar->debug.fwlog_queue.lock);
+
+ ret = wait_for_completion_interruptible(
+ &ar->debug.fwlog_completion);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ spin_lock(&ar->debug.fwlog_queue.lock);
+ }
+
+ while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
+ if (skb->len > count - len) {
+ /* not enough space, put skb back and leave */
+ __skb_queue_head(&ar->debug.fwlog_queue, skb);
+ break;
+ }
+
+
+ memcpy(buf + len, skb->data, skb->len);
+ len += skb->len;
+
+ kfree_skb(skb);
+ }
+
+ spin_unlock(&ar->debug.fwlog_queue.lock);
+
+ /* FIXME: what to do if len == 0? */
+
+ not_copied = copy_to_user(user_buf, buf, len);
+ if (not_copied != 0) {
+ ret_cnt = -EFAULT;
+ goto out;
+ }
+
+ *ppos = *ppos + len;
+
+ ret_cnt = len;
+
+out:
+ vfree(buf);
+
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fwlog_block = {
+ .open = ath6kl_fwlog_open,
+ .release = ath6kl_fwlog_release,
+ .read = ath6kl_fwlog_block_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -637,9 +740,13 @@ static ssize_t ath6kl_endpoint_stats_read(struct file *file,
return -ENOMEM;
#define EPSTAT(name) \
- len = print_endpoint_stat(target, buf, buf_len, len, \
- offsetof(struct htc_endpoint_stats, name), \
- #name)
+ do { \
+ len = print_endpoint_stat(target, buf, buf_len, len, \
+ offsetof(struct htc_endpoint_stats, \
+ name), \
+ #name); \
+ } while (0)
+
EPSTAT(cred_low_indicate);
EPSTAT(tx_issued);
EPSTAT(tx_pkt_bundled);
@@ -749,17 +856,9 @@ static ssize_t ath6kl_regread_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
- u8 buf[50];
- unsigned int len;
unsigned long reg_addr;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
-
- if (strict_strtoul(buf, 0, &reg_addr))
+ if (kstrtoul_from_user(user_buf, count, 0, &reg_addr))
return -EINVAL;
if ((reg_addr % 4) != 0)
@@ -873,15 +972,8 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
{
struct ath6kl *ar = file->private_data;
unsigned long lrssi_roam_threshold;
- char buf[32];
- ssize_t len;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (strict_strtoul(buf, 0, &lrssi_roam_threshold))
+ if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
return -EINVAL;
ar->lrssi_roam_threshold = lrssi_roam_threshold;
@@ -1411,6 +1503,8 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
return -EINVAL;
pstream.medium_time = cpu_to_le32(val32);
+ pstream.nominal_phy = le32_to_cpu(pstream.min_phy_rate) / 1000000;
+
ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
return count;
@@ -1505,57 +1599,51 @@ static const struct file_operations fops_bgscan_int = {
};
static ssize_t ath6kl_listen_int_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
- u16 listen_int_t, listen_int_b;
+ struct ath6kl_vif *vif;
+ u16 listen_interval;
char buf[32];
- char *sptr, *token;
ssize_t len;
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
- sptr = buf;
-
- token = strsep(&sptr, " ");
- if (!token)
+ if (kstrtou16(buf, 0, &listen_interval))
return -EINVAL;
- if (kstrtou16(token, 0, &listen_int_t))
+ if ((listen_interval < 15) || (listen_interval > 3000))
return -EINVAL;
- if (kstrtou16(sptr, 0, &listen_int_b))
- return -EINVAL;
-
- if ((listen_int_t < 15) || (listen_int_t > 5000))
- return -EINVAL;
-
- if ((listen_int_b < 1) || (listen_int_b > 50))
- return -EINVAL;
-
- ar->listen_intvl_t = listen_int_t;
- ar->listen_intvl_b = listen_int_b;
-
- ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
- ar->listen_intvl_b);
+ vif->listen_intvl_t = listen_interval;
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->listen_intvl_t, 0);
return count;
}
static ssize_t ath6kl_listen_int_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
char buf[32];
int len;
- len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
- ar->listen_intvl_b);
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", vif->listen_intvl_t);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1628,33 +1716,29 @@ static const struct file_operations fops_power_params = {
.llseek = default_llseek,
};
-int ath6kl_debug_init(struct ath6kl *ar)
+void ath6kl_debug_init(struct ath6kl *ar)
{
- ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
- if (ar->debug.fwlog_buf.buf == NULL)
- return -ENOMEM;
-
- ar->debug.fwlog_tmp = kmalloc(ATH6KL_FWLOG_SLOT_SIZE, GFP_KERNEL);
- if (ar->debug.fwlog_tmp == NULL) {
- vfree(ar->debug.fwlog_buf.buf);
- return -ENOMEM;
- }
-
- spin_lock_init(&ar->debug.fwlog_lock);
+ skb_queue_head_init(&ar->debug.fwlog_queue);
+ init_completion(&ar->debug.fwlog_completion);
/*
* Actually we are lying here but don't know how to read the mask
* value from the firmware.
*/
ar->debug.fwlog_mask = 0;
+}
+/*
+ * Initialisation needs to happen in two stages as fwlog events can come
+ * before cfg80211 is initialised, and debugfs depends on cfg80211
+ * initialisation.
+ */
+int ath6kl_debug_init_fs(struct ath6kl *ar)
+{
ar->debugfs_phy = debugfs_create_dir("ath6kl",
ar->wiphy->debugfsdir);
- if (!ar->debugfs_phy) {
- vfree(ar->debug.fwlog_buf.buf);
- kfree(ar->debug.fwlog_tmp);
+ if (!ar->debugfs_phy)
return -ENOMEM;
- }
debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_tgt_stats);
@@ -1668,6 +1752,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog);
+ debugfs_create_file("fwlog_block", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_fwlog_block);
+
debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy,
ar, &fops_fwlog_mask);
@@ -1702,24 +1789,26 @@ int ath6kl_debug_init(struct ath6kl *ar)
ar->debugfs_phy, ar, &fops_disconnect_timeout);
debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
- &fops_create_qos);
+ &fops_create_qos);
debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
- &fops_delete_qos);
+ &fops_delete_qos);
debugfs_create_file("bgscan_interval", S_IWUSR,
- ar->debugfs_phy, ar, &fops_bgscan_int);
+ ar->debugfs_phy, ar, &fops_bgscan_int);
+
+ debugfs_create_file("listen_interval", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_listen_int);
debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
- &fops_power_params);
+ &fops_power_params);
return 0;
}
void ath6kl_debug_cleanup(struct ath6kl *ar)
{
- vfree(ar->debug.fwlog_buf.buf);
- kfree(ar->debug.fwlog_tmp);
+ skb_queue_purge(&ar->debug.fwlog_queue);
kfree(ar->debug.roam_tbl);
}
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 9853c9c125c1..1803a0baae82 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -41,6 +42,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
ATH6KL_DBG_WMI_DUMP = BIT(19),
ATH6KL_DBG_SUSPEND = BIT(20),
+ ATH6KL_DBG_USB = BIT(21),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
@@ -55,35 +57,16 @@ int ath6kl_printk(const char *level, const char *fmt, ...);
#define ath6kl_warn(fmt, ...) \
ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
-#define AR_DBG_LVL_CHECK(mask) (debug_mask & mask)
-
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
};
#ifdef CONFIG_ATH6KL_DEBUG
-#define ath6kl_dbg(mask, fmt, ...) \
- ({ \
- int rtn; \
- if (debug_mask & mask) \
- rtn = ath6kl_printk(KERN_DEBUG, fmt, ##__VA_ARGS__); \
- else \
- rtn = 0; \
- \
- rtn; \
- })
-
-static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
- const char *msg, const char *prefix,
- const void *buf, size_t len)
-{
- if (debug_mask & mask) {
- if (msg)
- ath6kl_dbg(mask, "%s\n", msg);
- print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
- }
-}
+void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
+void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
@@ -95,7 +78,8 @@ int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
size_t len);
void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
-int ath6kl_debug_init(struct ath6kl *ar);
+void ath6kl_debug_init(struct ath6kl *ar);
+int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
@@ -145,7 +129,11 @@ static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
{
}
-static inline int ath6kl_debug_init(struct ath6kl *ar)
+static inline void ath6kl_debug_init(struct ath6kl *ar)
+{
+}
+
+static inline int ath6kl_debug_init_fs(struct ath6kl *ar)
{
return 0;
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index 2fe1dadfc77a..fd84086638e3 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index e57da35e59fa..68ed6c2665b7 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -15,6 +16,8 @@
*/
#include "hif.h"
+#include <linux/export.h>
+
#include "core.h"
#include "target.h"
#include "hif-ops.h"
@@ -59,6 +62,8 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
return 0;
}
+EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
+
#define REG_DUMP_COUNT_AR6003 60
#define REGISTER_DUMP_LEN_MAX 60
@@ -85,7 +90,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
}
ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
- regdump_addr);
+ regdump_addr);
regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
/* fetch register dump data */
@@ -102,9 +107,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
- for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
+ for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
- 4 * i,
+ i,
le32_to_cpu(regdump_val[i]),
le32_to_cpu(regdump_val[i + 1]),
le32_to_cpu(regdump_val[i + 2]),
@@ -130,6 +135,7 @@ static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
ath6kl_hif_dump_fw_crash(dev->ar);
+ ath6kl_read_fwlogs(dev->ar);
return ret;
}
@@ -279,7 +285,7 @@ static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
dev->irq_en_reg.cntr_int_status_en;
ath6kl_dbg(ATH6KL_DBG_IRQ,
- "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
+ "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
counter_int_status);
/*
@@ -354,7 +360,7 @@ static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
}
ath6kl_dbg(ATH6KL_DBG_IRQ,
- "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
+ "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
cpu_int_status);
/* Clear the interrupt */
@@ -429,9 +435,8 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
if (status)
goto out;
- if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
- ath6kl_dump_registers(dev, &dev->irq_proc_reg,
- &dev->irq_en_reg);
+ ath6kl_dump_registers(dev, &dev->irq_proc_reg,
+ &dev->irq_en_reg);
/* Update only those registers that are enabled */
host_int_status = dev->irq_proc_reg.host_int_status &
@@ -561,6 +566,7 @@ int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
return status;
}
+EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler);
static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
{
@@ -689,6 +695,11 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
+ /* usb doesn't support enabling interrupts */
+ /* FIXME: remove check once USB support is implemented */
+ if (dev->ar->hif_type == ATH6KL_HIF_TYPE_USB)
+ return 0;
+
status = ath6kl_hif_disable_intrs(dev);
fail_setup:
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 699a036f3a44..20ed6b73517b 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -197,6 +198,8 @@ struct hif_scatter_req {
u8 *virt_dma_buf;
struct hif_scatter_item scat_list[1];
+
+ u32 scat_q_depth;
};
struct ath6kl_irq_proc_registers {
@@ -220,6 +223,7 @@ struct ath6kl_irq_enable_reg {
} __packed;
struct ath6kl_device {
+ /* protects irq_proc_reg and irq_en_reg below */
spinlock_t lock;
struct ath6kl_irq_proc_registers irq_proc_reg;
struct ath6kl_irq_enable_reg irq_en_reg;
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index f3b63ca25c7e..4849d99cce77 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +23,9 @@
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+/* threshold to re-enable Tx bundling for an AC*/
+#define TX_RESUME_BUNDLE_THRESHOLD 1500
+
/* Functions for Tx credit handling */
static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
struct htc_endpoint_credit_dist *ep_dist,
@@ -168,31 +172,29 @@ static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
struct list_head *epdist_list)
{
- struct htc_endpoint_credit_dist *cur_dist_list;
+ struct htc_endpoint_credit_dist *cur_list;
- list_for_each_entry(cur_dist_list, epdist_list, list) {
- if (cur_dist_list->endpoint == ENDPOINT_0)
+ list_for_each_entry(cur_list, epdist_list, list) {
+ if (cur_list->endpoint == ENDPOINT_0)
continue;
- if (cur_dist_list->cred_to_dist > 0) {
- cur_dist_list->credits +=
- cur_dist_list->cred_to_dist;
- cur_dist_list->cred_to_dist = 0;
- if (cur_dist_list->credits >
- cur_dist_list->cred_assngd)
+ if (cur_list->cred_to_dist > 0) {
+ cur_list->credits += cur_list->cred_to_dist;
+ cur_list->cred_to_dist = 0;
+
+ if (cur_list->credits > cur_list->cred_assngd)
ath6kl_credit_reduce(cred_info,
- cur_dist_list,
- cur_dist_list->cred_assngd);
+ cur_list,
+ cur_list->cred_assngd);
- if (cur_dist_list->credits >
- cur_dist_list->cred_norm)
- ath6kl_credit_reduce(cred_info, cur_dist_list,
- cur_dist_list->cred_norm);
+ if (cur_list->credits > cur_list->cred_norm)
+ ath6kl_credit_reduce(cred_info, cur_list,
+ cur_list->cred_norm);
- if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (cur_dist_list->txq_depth == 0)
+ if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_list->txq_depth == 0)
ath6kl_credit_reduce(cred_info,
- cur_dist_list, 0);
+ cur_list, 0);
}
}
}
@@ -460,8 +462,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
INIT_LIST_HEAD(&tx_compq);
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc tx scat complete len %d entries %d\n",
- scat_req->len, scat_req->scat_entries);
+ "htc tx scat complete len %d entries %d\n",
+ scat_req->len, scat_req->scat_entries);
if (scat_req->status)
ath6kl_err("send scatter req failed: %d\n", scat_req->status);
@@ -599,8 +601,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
list);
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc tx got packet 0x%p queue depth %d\n",
- packet, get_queue_depth(&endpoint->txq));
+ "htc tx got packet 0x%p queue depth %d\n",
+ packet, get_queue_depth(&endpoint->txq));
len = CALC_TXRX_PADDED_LEN(target,
packet->act_len + HTC_HDR_LENGTH);
@@ -670,6 +672,7 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
struct htc_packet *packet;
int i, len, rem_scat, cred_pad;
int status = 0;
+ u8 flags;
rem_scat = target->max_tx_bndl_sz;
@@ -696,9 +699,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
scat_req->scat_list[i].packet = packet;
/* prepare packet and flag message as part of a send bundle */
- ath6kl_htc_tx_prep_pkt(packet,
- packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
- cred_pad, packet->info.tx.seqno);
+ flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
+ ath6kl_htc_tx_prep_pkt(packet, flags,
+ cred_pad, packet->info.tx.seqno);
/* Make sure the buffer is 4-byte aligned */
ath6kl_htc_tx_buf_align(&packet->buf,
packet->act_len + HTC_HDR_LENGTH);
@@ -744,6 +747,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
struct hif_scatter_req *scat_req = NULL;
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
int status;
+ u32 txb_mask;
+ u8 ac = WMM_NUM_AC;
+
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ (WMI_CONTROL_SVC != endpoint->svc_id))
+ ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
status = 0;
@@ -759,10 +768,35 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
if (!scat_req) {
/* no scatter resources */
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc tx no more scatter resources\n");
+ "htc tx no more scatter resources\n");
break;
}
+ if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
+ if (WMM_AC_BE == ac)
+ /*
+ * BE, BK have priorities and bit
+ * positions reversed
+ */
+ txb_mask = (1 << WMM_AC_BK);
+ else
+ /*
+ * any AC with priority lower than
+ * itself
+ */
+ txb_mask = ((1 << ac) - 1);
+ /*
+ * when the scatter request resources drop below a
+ * certain threshold, disable Tx bundling for all
+ * AC's with priority lower than the current requesting
+ * AC. Otherwise re-enable Tx bundling for them
+ */
+ if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
+ target->tx_bndl_mask &= ~txb_mask;
+ else
+ target->tx_bndl_mask |= txb_mask;
+ }
+
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat);
@@ -806,6 +840,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
struct htc_packet *packet;
int bundle_sent;
int n_pkts_bundle;
+ u8 ac = WMM_NUM_AC;
spin_lock_bh(&target->tx_lock);
@@ -823,6 +858,10 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
*/
INIT_LIST_HEAD(&txq);
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ (WMI_CONTROL_SVC != endpoint->svc_id))
+ ac = target->dev->ar->ep2ac_map[endpoint->eid];
+
while (true) {
if (list_empty(&endpoint->txq))
@@ -840,15 +879,18 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
while (true) {
/* try to send a bundle on each pass */
- if ((target->tx_bndl_enable) &&
+ if ((target->tx_bndl_mask) &&
(get_queue_depth(&txq) >=
HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
int temp1 = 0, temp2 = 0;
- ath6kl_htc_tx_bundle(endpoint, &txq,
- &temp1, &temp2);
- bundle_sent += temp1;
- n_pkts_bundle += temp2;
+ /* check if bundling is enabled for an AC */
+ if (target->tx_bndl_mask & (1 << ac)) {
+ ath6kl_htc_tx_bundle(endpoint, &txq,
+ &temp1, &temp2);
+ bundle_sent += temp1;
+ n_pkts_bundle += temp2;
+ }
}
if (list_empty(&txq))
@@ -867,6 +909,26 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
endpoint->ep_st.tx_bundles += bundle_sent;
endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
+
+ /*
+ * if an AC has bundling disabled and no tx bundling
+ * has occured continously for a certain number of TX,
+ * enable tx bundling for this AC
+ */
+ if (!bundle_sent) {
+ if (!(target->tx_bndl_mask & (1 << ac)) &&
+ (ac < WMM_NUM_AC)) {
+ if (++target->ac_tx_count[ac] >=
+ TX_RESUME_BUNDLE_THRESHOLD) {
+ target->ac_tx_count[ac] = 0;
+ target->tx_bndl_mask |= (1 << ac);
+ }
+ }
+ } else {
+ /* tx bundling will reset the counter */
+ if (ac < WMM_NUM_AC)
+ target->ac_tx_count[ac] = 0;
+ }
}
endpoint->tx_proc_cnt = 0;
@@ -979,8 +1041,8 @@ static int htc_setup_tx_complete(struct htc_target *target)
memcpy(&setup_comp_ext->flags, &flags,
sizeof(setup_comp_ext->flags));
set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
- sizeof(struct htc_setup_comp_ext_msg),
- ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+ sizeof(struct htc_setup_comp_ext_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
} else {
struct htc_setup_comp_msg *setup_comp;
@@ -988,8 +1050,8 @@ static int htc_setup_tx_complete(struct htc_target *target)
memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
- sizeof(struct htc_setup_comp_msg),
- ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+ sizeof(struct htc_setup_comp_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
}
/* we want synchronous operation */
@@ -1088,9 +1150,9 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
packet->status = -ECANCELED;
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
- packet, packet->act_len,
- packet->endpoint, packet->info.tx.tag);
+ "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
+ packet, packet->act_len,
+ packet->endpoint, packet->info.tx.tag);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
@@ -1490,7 +1552,7 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
if (packets->act_len > 0) {
ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
- packets->act_len + HTC_HDR_LENGTH);
+ packets->act_len + HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_HTC,
"htc rx unexpected endpoint 0 message", "",
@@ -1609,8 +1671,8 @@ static int htc_parse_trailer(struct htc_target *target,
}
lk_ahd = (struct htc_lookahead_report *) record_buf;
- if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
- && next_lk_ahds) {
+ if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
+ next_lk_ahds) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
@@ -2038,13 +2100,13 @@ fail_rx:
list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
list_del(&packet->list);
htc_reclaim_rxbuf(target, packet,
- &target->endpoint[packet->endpoint]);
+ &target->endpoint[packet->endpoint]);
}
list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
list_del(&packet->list);
htc_reclaim_rxbuf(target, packet,
- &target->endpoint[packet->endpoint]);
+ &target->endpoint[packet->endpoint]);
}
return status;
@@ -2062,6 +2124,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
enum htc_endpoint_id id;
int n_fetched = 0;
+ INIT_LIST_HEAD(&comp_pktq);
*num_pkts = 0;
/*
@@ -2175,11 +2238,11 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
u32 look_ahead;
if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
- HTC_TARGET_RESPONSE_TIMEOUT))
+ HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
+ "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
@@ -2244,7 +2307,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
depth = get_queue_depth(pkt_queue);
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc rx add multiple ep id %d cnt %d len %d\n",
+ "htc rx add multiple ep id %d cnt %d len %d\n",
first_pkt->endpoint, depth, first_pkt->buf_len);
endpoint = &target->endpoint[first_pkt->endpoint];
@@ -2270,8 +2333,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
if (target->ep_waiting == first_pkt->endpoint) {
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc rx blocked on ep %d, unblocking\n",
- target->ep_waiting);
+ "htc rx blocked on ep %d, unblocking\n",
+ target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
rx_unblock = true;
@@ -2308,7 +2371,21 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
"htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint);
- dev_kfree_skb(packet->pkt_cntxt);
+ /*
+ * packets in rx_bufq of endpoint 0 have originally
+ * been queued from target->free_ctrl_rxbuf where
+ * packet and packet->buf_start are allocated
+ * separately using kmalloc(). For other endpoint
+ * rx_bufq, it is allocated as skb where packet is
+ * skb->head. Take care of this difference while freeing
+ * the memory.
+ */
+ if (packet->endpoint == ENDPOINT_0) {
+ kfree(packet->buf_start);
+ kfree(packet);
+ } else {
+ dev_kfree_skb(packet->pkt_cntxt);
+ }
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
@@ -2327,6 +2404,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
unsigned int max_msg_sz = 0;
int status = 0;
+ u16 msg_id;
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc connect service target 0x%p service id 0x%x\n",
@@ -2370,9 +2448,10 @@ int ath6kl_htc_conn_service(struct htc_target *target,
}
resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
+ msg_id = le16_to_cpu(resp_msg->msg_id);
- if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
- || (rx_pkt->act_len < sizeof(*resp_msg))) {
+ if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
+ (rx_pkt->act_len < sizeof(*resp_msg))) {
status = -ENOMEM;
goto fail_tx;
}
@@ -2419,6 +2498,15 @@ int ath6kl_htc_conn_service(struct htc_target *target,
endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
+ switch (endpoint->svc_id) {
+ case WMI_DATA_BK_SVC:
+ endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
+ break;
+ default:
+ endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
+ break;
+ }
+
if (conn_req->max_rxmsg_sz) {
/*
* Override cred_per_msg calculation, this optimizes
@@ -2516,7 +2604,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz)
- target->tx_bndl_enable = true;
+ /* tx_bndl_mask is enabled per AC, each has 1 bit */
+ target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
if (target->max_rx_bndl_sz)
target->rx_bndl_enable = true;
@@ -2531,7 +2620,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
* padding will spill into the next credit buffer
* which is fatal.
*/
- target->tx_bndl_enable = false;
+ target->tx_bndl_mask = 0;
}
}
@@ -2543,6 +2632,12 @@ int ath6kl_htc_wait_target(struct htc_target *target)
struct htc_service_connect_resp resp;
int status;
+ /* FIXME: remove once USB support is implemented */
+ if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
+ ath6kl_err("HTC doesn't support USB yet. Patience!\n");
+ return -EOPNOTSUPP;
+ }
+
/* we should be getting 1 control message that the target is ready */
packet = htc_wait_for_ctrl_msg(target);
@@ -2582,8 +2677,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
}
ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
- (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
- target->htc_tgt_ver);
+ (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
+ target->htc_tgt_ver);
if (target->msg_per_bndl_max > 0)
htc_setup_msg_bndl(target);
@@ -2772,17 +2867,19 @@ void ath6kl_htc_cleanup(struct htc_target *target)
{
struct htc_packet *packet, *tmp_packet;
- ath6kl_hif_cleanup_scatter(target->dev->ar);
+ /* FIXME: remove check once USB support is implemented */
+ if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
list_for_each_entry_safe(packet, tmp_packet,
- &target->free_ctrl_txbuf, list) {
+ &target->free_ctrl_txbuf, list) {
list_del(&packet->list);
kfree(packet->buf_start);
kfree(packet);
}
list_for_each_entry_safe(packet, tmp_packet,
- &target->free_ctrl_rxbuf, list) {
+ &target->free_ctrl_rxbuf, list) {
list_del(&packet->list);
kfree(packet->buf_start);
kfree(packet);
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 57672e1ed1a6..5027ccc36b62 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -87,6 +88,8 @@
#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
#define WMI_MAX_SERVICES 5
+#define WMM_NUM_AC 4
+
/* reserved and used to flush ALL packets */
#define HTC_TX_PACKET_TAG_ALL 0
#define HTC_SERVICE_TX_PACKET_TAG 1
@@ -498,6 +501,7 @@ struct htc_endpoint {
u8 seqno;
u32 conn_flags;
struct htc_endpoint_stats ep_st;
+ u16 tx_drop_packet_threshold;
};
struct htc_control_buffer {
@@ -519,9 +523,16 @@ struct htc_target {
struct ath6kl_htc_credit_info *credit_info;
int tgt_creds;
unsigned int tgt_cred_sz;
+
+ /* protects free_ctrl_txbuf and free_ctrl_rxbuf */
spinlock_t htc_lock;
+
+ /* FIXME: does this protext rx_bufq and endpoint structures or what? */
spinlock_t rx_lock;
+
+ /* protects endpoint->txq */
spinlock_t tx_lock;
+
struct ath6kl_device *dev;
u32 htc_flags;
u32 rx_st_flags;
@@ -531,7 +542,7 @@ struct htc_target {
/* max messages per bundle for HTC */
int msg_per_bndl_max;
- bool tx_bndl_enable;
+ u32 tx_bndl_mask;
int rx_bndl_enable;
int max_rx_bndl_sz;
int max_tx_bndl_sz;
@@ -543,6 +554,9 @@ struct htc_target {
int max_xfer_szper_scatreq;
int chk_irq_status_cnt;
+
+ /* counts the number of Tx without bundling continously per AC */
+ u32 ac_tx_count[WMM_NUM_AC];
};
void *ath6kl_htc_create(struct ath6kl *ar);
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 7f55be3092d1..03cae142f178 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,22 +18,16 @@
#include <linux/moduleparam.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/of.h>
#include <linux/mmc/sdio_func.h>
+
#include "core.h"
#include "cfg80211.h"
#include "target.h"
#include "debug.h"
#include "hif-ops.h"
-unsigned int debug_mask;
-static unsigned int testmode;
-static bool suspend_cutpower;
-
-module_param(debug_mask, uint, 0644);
-module_param(testmode, uint, 0644);
-module_param(suspend_cutpower, bool, 0444);
-
static const struct ath6kl_hw hw_list[] = {
{
.id = AR6003_HW_2_0_VERSION,
@@ -47,11 +42,14 @@ static const struct ath6kl_hw hw_list[] = {
/* hw2.0 needs override address hardcoded */
.app_start_override_addr = 0x944C00,
- .fw_otp = AR6003_HW_2_0_OTP_FILE,
- .fw = AR6003_HW_2_0_FIRMWARE_FILE,
- .fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
- .fw_patch = AR6003_HW_2_0_PATCH_FILE,
- .fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE,
+ .fw = {
+ .dir = AR6003_HW_2_0_FW_DIR,
+ .otp = AR6003_HW_2_0_OTP_FILE,
+ .fw = AR6003_HW_2_0_FIRMWARE_FILE,
+ .tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
+ .patch = AR6003_HW_2_0_PATCH_FILE,
+ },
+
.fw_board = AR6003_HW_2_0_BOARD_DATA_FILE,
.fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
},
@@ -64,14 +62,20 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 512,
.refclk_hz = 26000000,
.uarttx_pin = 8,
+ .testscript_addr = 0x57ef74,
+
+ .fw = {
+ .dir = AR6003_HW_2_1_1_FW_DIR,
+ .otp = AR6003_HW_2_1_1_OTP_FILE,
+ .fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
+ .tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
+ .patch = AR6003_HW_2_1_1_PATCH_FILE,
+ .utf = AR6003_HW_2_1_1_UTF_FIRMWARE_FILE,
+ .testscript = AR6003_HW_2_1_1_TESTSCRIPT_FILE,
+ },
- .fw_otp = AR6003_HW_2_1_1_OTP_FILE,
- .fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
- .fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
- .fw_patch = AR6003_HW_2_1_1_PATCH_FILE,
- .fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE,
.fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
- .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
},
{
.id = AR6004_HW_1_0_VERSION,
@@ -84,8 +88,11 @@ static const struct ath6kl_hw hw_list[] = {
.refclk_hz = 26000000,
.uarttx_pin = 11,
- .fw = AR6004_HW_1_0_FIRMWARE_FILE,
- .fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE,
+ .fw = {
+ .dir = AR6004_HW_1_0_FW_DIR,
+ .fw = AR6004_HW_1_0_FIRMWARE_FILE,
+ },
+
.fw_board = AR6004_HW_1_0_BOARD_DATA_FILE,
.fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
},
@@ -100,8 +107,11 @@ static const struct ath6kl_hw hw_list[] = {
.refclk_hz = 40000000,
.uarttx_pin = 11,
- .fw = AR6004_HW_1_1_FIRMWARE_FILE,
- .fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE,
+ .fw = {
+ .dir = AR6004_HW_1_1_FW_DIR,
+ .fw = AR6004_HW_1_1_FIRMWARE_FILE,
+ },
+
.fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
.fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
},
@@ -342,11 +352,7 @@ static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
blk_size |= ((u32)htc_ctrl_buf) << 16;
/* set the host interest area for the block size */
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_mbox_io_block_sz)),
- (u8 *)&blk_size,
- 4);
+ status = ath6kl_bmi_write_hi32(ar, hi_mbox_io_block_sz, blk_size);
if (status) {
ath6kl_err("bmi_write_memory for IO block size failed\n");
goto out;
@@ -358,11 +364,8 @@ static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
if (mbox_isr_yield_val) {
/* set the host interest area for the mbox ISR yield limit */
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_mbox_isr_yield_limit)),
- (u8 *)&mbox_isr_yield_val,
- 4);
+ status = ath6kl_bmi_write_hi32(ar, hi_mbox_isr_yield_limit,
+ mbox_isr_yield_val);
if (status) {
ath6kl_err("bmi_write_memory for yield limit failed\n");
goto out;
@@ -375,7 +378,6 @@ out:
static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
{
- int status = 0;
int ret;
/*
@@ -383,43 +385,54 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
* default values. Required if checksum offload is needed. Set
* RxMetaVersion to 2.
*/
- if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
- ar->rx_meta_ver, 0, 0)) {
- ath6kl_err("unable to set the rx frame format\n");
- status = -EIO;
+ ret = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
+ ar->rx_meta_ver, 0, 0);
+ if (ret) {
+ ath6kl_err("unable to set the rx frame format: %d\n", ret);
+ return ret;
}
- if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
- if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
- IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
- ath6kl_err("unable to set power save fail event policy\n");
- status = -EIO;
+ if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) {
+ ret = ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
+ IGNORE_PS_FAIL_DURING_SCAN);
+ if (ret) {
+ ath6kl_err("unable to set power save fail event policy: %d\n",
+ ret);
+ return ret;
}
+ }
- if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
- if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
- WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
- ath6kl_err("unable to set barker preamble policy\n");
- status = -EIO;
+ if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) {
+ ret = ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
+ WMI_FOLLOW_BARKER_IN_ERP);
+ if (ret) {
+ ath6kl_err("unable to set barker preamble policy: %d\n",
+ ret);
+ return ret;
}
+ }
- if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
- WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
- ath6kl_err("unable to set keep alive interval\n");
- status = -EIO;
+ ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
+ WLAN_CONFIG_KEEP_ALIVE_INTERVAL);
+ if (ret) {
+ ath6kl_err("unable to set keep alive interval: %d\n", ret);
+ return ret;
}
- if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
- WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
- ath6kl_err("unable to set disconnect timeout\n");
- status = -EIO;
+ ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
+ WLAN_CONFIG_DISCONNECT_TIMEOUT);
+ if (ret) {
+ ath6kl_err("unable to set disconnect timeout: %d\n", ret);
+ return ret;
}
- if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
- if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
- ath6kl_err("unable to set txop bursting\n");
- status = -EIO;
+ if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) {
+ ret = ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED);
+ if (ret) {
+ ath6kl_err("unable to set txop bursting: %d\n", ret);
+ return ret;
}
+ }
if (ar->p2p && (ar->vif_max == 1 || idx)) {
ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
@@ -443,7 +456,7 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
}
}
- return status;
+ return ret;
}
int ath6kl_configure_target(struct ath6kl *ar)
@@ -452,6 +465,12 @@ int ath6kl_configure_target(struct ath6kl *ar)
u8 fw_iftype, fw_mode = 0, fw_submode = 0;
int i, status;
+ param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG);
+ if (ath6kl_bmi_write_hi32(ar, hi_serial_enable, param)) {
+ ath6kl_err("bmi_write_memory for uart debug failed\n");
+ return -EIO;
+ }
+
/*
* Note: Even though the firmware interface type is
* chosen as BSS_STA for all three interfaces, can
@@ -483,11 +502,8 @@ int ath6kl_configure_target(struct ath6kl *ar)
if (ar->p2p && ar->vif_max == 1)
fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
- param = HTC_PROTOCOL_VERSION;
- if (ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_app_host_interest)),
- (u8 *)&param, 4) != 0) {
+ if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
+ HTC_PROTOCOL_VERSION) != 0) {
ath6kl_err("bmi_write_memory for htc version failed\n");
return -EIO;
}
@@ -495,10 +511,7 @@ int ath6kl_configure_target(struct ath6kl *ar)
/* set the firmware mode to STA/IBSS/AP */
param = 0;
- if (ath6kl_bmi_read(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_option_flag)),
- (u8 *)&param, 4) != 0) {
+ if (ath6kl_bmi_read_hi32(ar, hi_option_flag, &param) != 0) {
ath6kl_err("bmi_read_memory for setting fwmode failed\n");
return -EIO;
}
@@ -510,11 +523,7 @@ int ath6kl_configure_target(struct ath6kl *ar)
param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
- if (ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_option_flag)),
- (u8 *)&param,
- 4) != 0) {
+ if (ath6kl_bmi_write_hi32(ar, hi_option_flag, param) != 0) {
ath6kl_err("bmi_write_memory for setting fwmode failed\n");
return -EIO;
}
@@ -533,16 +542,13 @@ int ath6kl_configure_target(struct ath6kl *ar)
param = ar->hw.board_ext_data_addr;
ram_reserved_size = ar->hw.reserved_ram_size;
- if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_board_ext_data)),
- (u8 *)&param, 4) != 0) {
+ if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
return -EIO;
}
- if (ath6kl_bmi_write(ar, ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_end_ram_reserve_sz)),
- (u8 *)&ram_reserved_size, 4) != 0) {
+ if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
+ ram_reserved_size) != 0) {
ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
return -EIO;
}
@@ -553,56 +559,19 @@ int ath6kl_configure_target(struct ath6kl *ar)
return -EIO;
/* Configure GPIO AR600x UART */
- param = ar->hw.uarttx_pin;
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_dbg_uart_txpin)),
- (u8 *)&param, 4);
+ status = ath6kl_bmi_write_hi32(ar, hi_dbg_uart_txpin,
+ ar->hw.uarttx_pin);
if (status)
return status;
/* Configure target refclk_hz */
- param = ar->hw.refclk_hz;
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_refclk_hz)),
- (u8 *)&param, 4);
+ status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz, ar->hw.refclk_hz);
if (status)
return status;
return 0;
}
-void ath6kl_core_free(struct ath6kl *ar)
-{
- wiphy_free(ar->wiphy);
-}
-
-void ath6kl_core_cleanup(struct ath6kl *ar)
-{
- ath6kl_hif_power_off(ar);
-
- destroy_workqueue(ar->ath6kl_wq);
-
- if (ar->htc_target)
- ath6kl_htc_cleanup(ar->htc_target);
-
- ath6kl_cookie_cleanup(ar);
-
- ath6kl_cleanup_amsdu_rxbufs(ar);
-
- ath6kl_bmi_cleanup(ar);
-
- ath6kl_debug_cleanup(ar);
-
- kfree(ar->fw_board);
- kfree(ar->fw_otp);
- kfree(ar->fw);
- kfree(ar->fw_patch);
-
- ath6kl_deinit_ieee80211_hw(ar);
-}
-
/* firmware upload */
static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
u8 **fw, size_t *fw_len)
@@ -626,21 +595,6 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
}
#ifdef CONFIG_OF
-static const char *get_target_ver_dir(const struct ath6kl *ar)
-{
- switch (ar->version.target_ver) {
- case AR6003_HW_1_0_VERSION:
- return "ath6k/AR6003/hw1.0";
- case AR6003_HW_2_0_VERSION:
- return "ath6k/AR6003/hw2.0";
- case AR6003_HW_2_1_1_VERSION:
- return "ath6k/AR6003/hw2.1.1";
- }
- ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
- ar->version.target_ver);
- return NULL;
-}
-
/*
* Check the device tree for a board-id and use it to construct
* the pathname to the firmware file. Used (for now) to find a
@@ -663,7 +617,7 @@ static bool check_device_tree(struct ath6kl *ar)
continue;
}
snprintf(board_filename, sizeof(board_filename),
- "%s/bdata.%s.bin", get_target_ver_dir(ar), board_id);
+ "%s/bdata.%s.bin", ar->hw.fw.dir, board_id);
ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board,
&ar->fw_board_len);
@@ -730,19 +684,20 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
static int ath6kl_fetch_otp_file(struct ath6kl *ar)
{
- const char *filename;
+ char filename[100];
int ret;
if (ar->fw_otp != NULL)
return 0;
- if (ar->hw.fw_otp == NULL) {
+ if (ar->hw.fw.otp == NULL) {
ath6kl_dbg(ATH6KL_DBG_BOOT,
"no OTP file configured for this hw\n");
return 0;
}
- filename = ar->hw.fw_otp;
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.otp);
ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
&ar->fw_otp_len);
@@ -755,33 +710,61 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar)
return 0;
}
-static int ath6kl_fetch_fw_file(struct ath6kl *ar)
+static int ath6kl_fetch_testmode_file(struct ath6kl *ar)
{
- const char *filename;
+ char filename[100];
int ret;
- if (ar->fw != NULL)
+ if (ar->testmode == 0)
return 0;
- if (testmode) {
- if (ar->hw.fw_tcmd == NULL) {
- ath6kl_warn("testmode not supported\n");
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "testmode %d\n", ar->testmode);
+
+ if (ar->testmode == 2) {
+ if (ar->hw.fw.utf == NULL) {
+ ath6kl_warn("testmode 2 not supported\n");
return -EOPNOTSUPP;
}
- filename = ar->hw.fw_tcmd;
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.utf);
+ } else {
+ if (ar->hw.fw.tcmd == NULL) {
+ ath6kl_warn("testmode 1 not supported\n");
+ return -EOPNOTSUPP;
+ }
- set_bit(TESTMODE, &ar->flag);
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.tcmd);
+ }
+
+ set_bit(TESTMODE, &ar->flag);
- goto get_fw;
+ ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
+ if (ret) {
+ ath6kl_err("Failed to get testmode %d firmware file %s: %d\n",
+ ar->testmode, filename, ret);
+ return ret;
}
- if (WARN_ON(ar->hw.fw == NULL))
+ return 0;
+}
+
+static int ath6kl_fetch_fw_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->fw != NULL)
+ return 0;
+
+ /* FIXME: remove WARN_ON() as we won't support FW API 1 for long */
+ if (WARN_ON(ar->hw.fw.fw == NULL))
return -EINVAL;
- filename = ar->hw.fw;
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.fw);
-get_fw:
ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
if (ret) {
ath6kl_err("Failed to get firmware file %s: %d\n",
@@ -794,16 +777,17 @@ get_fw:
static int ath6kl_fetch_patch_file(struct ath6kl *ar)
{
- const char *filename;
+ char filename[100];
int ret;
if (ar->fw_patch != NULL)
return 0;
- if (ar->hw.fw_patch == NULL)
+ if (ar->hw.fw.patch == NULL)
return 0;
- filename = ar->hw.fw_patch;
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.patch);
ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
&ar->fw_patch_len);
@@ -816,6 +800,34 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar)
return 0;
}
+static int ath6kl_fetch_testscript_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->testmode != 2)
+ return 0;
+
+ if (ar->fw_testscript != NULL)
+ return 0;
+
+ if (ar->hw.fw.testscript == NULL)
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.testscript);
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript,
+ &ar->fw_testscript_len);
+ if (ret) {
+ ath6kl_err("Failed to get testscript file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath6kl_fetch_fw_api1(struct ath6kl *ar)
{
int ret;
@@ -832,23 +844,24 @@ static int ath6kl_fetch_fw_api1(struct ath6kl *ar)
if (ret)
return ret;
+ ret = ath6kl_fetch_testscript_file(ar);
+ if (ret)
+ return ret;
+
return 0;
}
-static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
+static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
{
size_t magic_len, len, ie_len;
const struct firmware *fw;
struct ath6kl_fw_ie *hdr;
- const char *filename;
+ char filename[100];
const u8 *data;
int ret, ie_id, i, index, bit;
__le32 *val;
- if (ar->hw.fw_api2 == NULL)
- return -EOPNOTSUPP;
-
- filename = ar->hw.fw_api2;
+ snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name);
ret = request_firmware(&fw, filename, ar->dev);
if (ret)
@@ -892,7 +905,7 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
switch (ie_id) {
case ATH6KL_FW_IE_OTP_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
- ie_len);
+ ie_len);
ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL);
@@ -905,7 +918,11 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
break;
case ATH6KL_FW_IE_FW_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n",
- ie_len);
+ ie_len);
+
+ /* in testmode we already might have a fw file */
+ if (ar->fw != NULL)
+ break;
ar->fw = kmemdup(data, ie_len, GFP_KERNEL);
@@ -918,7 +935,7 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
break;
case ATH6KL_FW_IE_PATCH_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch image ie (%zd B)\n",
- ie_len);
+ ie_len);
ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL);
@@ -1010,7 +1027,7 @@ out:
return ret;
}
-static int ath6kl_fetch_firmwares(struct ath6kl *ar)
+int ath6kl_init_fetch_firmwares(struct ath6kl *ar)
{
int ret;
@@ -1018,17 +1035,30 @@ static int ath6kl_fetch_firmwares(struct ath6kl *ar)
if (ret)
return ret;
- ret = ath6kl_fetch_fw_api2(ar);
+ ret = ath6kl_fetch_testmode_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE);
if (ret == 0) {
- ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api 2\n");
- return 0;
+ ar->fw_api = 3;
+ goto out;
+ }
+
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API2_FILE);
+ if (ret == 0) {
+ ar->fw_api = 2;
+ goto out;
}
ret = ath6kl_fetch_fw_api1(ar);
if (ret)
return ret;
- ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api 1\n");
+ ar->fw_api = 1;
+
+out:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
}
@@ -1049,22 +1079,14 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
*/
if (ar->hw.board_addr != 0) {
board_address = ar->hw.board_addr;
- ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_board_data)),
- (u8 *) &board_address, 4);
+ ath6kl_bmi_write_hi32(ar, hi_board_data,
+ board_address);
} else {
- ath6kl_bmi_read(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_board_data)),
- (u8 *) &board_address, 4);
+ ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address);
}
/* determine where in target ram to write extended board data */
- ath6kl_bmi_read(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_board_ext_data)),
- (u8 *) &board_ext_address, 4);
+ ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address);
if (ar->target_type == TARGET_TYPE_AR6003 &&
board_ext_address == 0) {
@@ -1076,6 +1098,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
case TARGET_TYPE_AR6003:
board_data_size = AR6003_BOARD_DATA_SZ;
board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ;
+ if (ar->fw_board_len > (board_data_size + board_ext_data_size))
+ board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ_V2;
break;
case TARGET_TYPE_AR6004:
board_data_size = AR6004_BOARD_DATA_SZ;
@@ -1107,10 +1131,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
/* record that extended board data is initialized */
param = (board_ext_data_size << 16) | 1;
- ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_board_ext_data_config)),
- (unsigned char *) &param, 4);
+ ath6kl_bmi_write_hi32(ar, hi_board_ext_data_config, param);
}
if (ar->fw_board_len < board_data_size) {
@@ -1131,11 +1152,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
}
/* record the fact that Board Data IS initialized */
- param = 1;
- ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_board_data_initialized)),
- (u8 *)&param, 4);
+ ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, 1);
return ret;
}
@@ -1162,10 +1179,7 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
}
/* read firmware start address */
- ret = ath6kl_bmi_read(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_app_start)),
- (u8 *) &address, sizeof(address));
+ ret = ath6kl_bmi_read_hi32(ar, hi_app_start, &address);
if (ret) {
ath6kl_err("Failed to read hi_app_start: %d\n", ret);
@@ -1223,7 +1237,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar)
static int ath6kl_upload_patch(struct ath6kl *ar)
{
- u32 address, param;
+ u32 address;
int ret;
if (ar->fw_patch == NULL)
@@ -1240,11 +1254,37 @@ static int ath6kl_upload_patch(struct ath6kl *ar)
return ret;
}
- param = address;
- ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_dset_list_head)),
- (unsigned char *) &param, 4);
+ ath6kl_bmi_write_hi32(ar, hi_dset_list_head, address);
+
+ return 0;
+}
+
+static int ath6kl_upload_testscript(struct ath6kl *ar)
+{
+ u32 address;
+ int ret;
+
+ if (ar->testmode != 2)
+ return 0;
+
+ if (ar->fw_testscript == NULL)
+ return 0;
+
+ address = ar->hw.testscript_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing testscript to 0x%x (%zd B)\n",
+ address, ar->fw_testscript_len);
+
+ ret = ath6kl_bmi_write(ar, address, ar->fw_testscript,
+ ar->fw_testscript_len);
+ if (ret) {
+ ath6kl_err("Failed to write testscript file: %d\n", ret);
+ return ret;
+ }
+
+ ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address);
+ ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096);
+ ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1);
return 0;
}
@@ -1255,7 +1295,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
int status = 0;
if (ar->target_type != TARGET_TYPE_AR6003 &&
- ar->target_type != TARGET_TYPE_AR6004)
+ ar->target_type != TARGET_TYPE_AR6004)
return -EINVAL;
/* temporarily disable system sleep */
@@ -1312,7 +1352,8 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
/* WAR to avoid SDIO CRC err */
- if (ar->version.target_ver == AR6003_HW_2_0_VERSION) {
+ if (ar->version.target_ver == AR6003_HW_2_0_VERSION ||
+ ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
param = 0x20;
@@ -1357,6 +1398,11 @@ static int ath6kl_init_upload(struct ath6kl *ar)
if (status)
return status;
+ /* Download the test script */
+ status = ath6kl_upload_testscript(ar);
+ if (status)
+ return status;
+
/* Restore system sleep */
address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
status = ath6kl_bmi_reg_write(ar, address, sleep);
@@ -1372,9 +1418,9 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
}
-static int ath6kl_init_hw_params(struct ath6kl *ar)
+int ath6kl_init_hw_params(struct ath6kl *ar)
{
- const struct ath6kl_hw *hw;
+ const struct ath6kl_hw *uninitialized_var(hw);
int i;
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
@@ -1481,10 +1527,11 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
- ath6kl_info("%s %s fw %s%s\n",
+ ath6kl_info("%s %s fw %s api %d%s\n",
ar->hw.name,
ath6kl_init_get_hif_name(ar->hif_type),
ar->wiphy->fw_version,
+ ar->fw_api,
test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
}
@@ -1549,173 +1596,7 @@ int ath6kl_init_hw_stop(struct ath6kl *ar)
return 0;
}
-int ath6kl_core_init(struct ath6kl *ar)
-{
- struct ath6kl_bmi_target_info targ_info;
- struct net_device *ndev;
- int ret = 0, i;
-
- ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
- if (!ar->ath6kl_wq)
- return -ENOMEM;
-
- ret = ath6kl_bmi_init(ar);
- if (ret)
- goto err_wq;
-
- /*
- * Turn on power to get hardware (target) version and leave power
- * on delibrately as we will boot the hardware anyway within few
- * seconds.
- */
- ret = ath6kl_hif_power_on(ar);
- if (ret)
- goto err_bmi_cleanup;
-
- ret = ath6kl_bmi_get_target_info(ar, &targ_info);
- if (ret)
- goto err_power_off;
-
- ar->version.target_ver = le32_to_cpu(targ_info.version);
- ar->target_type = le32_to_cpu(targ_info.type);
- ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
-
- ret = ath6kl_init_hw_params(ar);
- if (ret)
- goto err_power_off;
-
- ar->htc_target = ath6kl_htc_create(ar);
-
- if (!ar->htc_target) {
- ret = -ENOMEM;
- goto err_power_off;
- }
-
- ret = ath6kl_fetch_firmwares(ar);
- if (ret)
- goto err_htc_cleanup;
-
- /* FIXME: we should free all firmwares in the error cases below */
-
- /* Indicate that WMI is enabled (although not ready yet) */
- set_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = ath6kl_wmi_init(ar);
- if (!ar->wmi) {
- ath6kl_err("failed to initialize wmi\n");
- ret = -EIO;
- goto err_htc_cleanup;
- }
-
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
-
- ret = ath6kl_register_ieee80211_hw(ar);
- if (ret)
- goto err_node_cleanup;
-
- ret = ath6kl_debug_init(ar);
- if (ret) {
- wiphy_unregister(ar->wiphy);
- goto err_node_cleanup;
- }
-
- for (i = 0; i < ar->vif_max; i++)
- ar->avail_idx_map |= BIT(i);
-
- rtnl_lock();
-
- /* Add an initial station interface */
- ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
- INFRA_NETWORK);
-
- rtnl_unlock();
-
- if (!ndev) {
- ath6kl_err("Failed to instantiate a network device\n");
- ret = -ENOMEM;
- wiphy_unregister(ar->wiphy);
- goto err_debug_init;
- }
-
-
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
- __func__, ndev->name, ndev, ar);
-
- /* setup access class priority mappings */
- ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
- ar->ac_stream_pri_map[WMM_AC_BE] = 1;
- ar->ac_stream_pri_map[WMM_AC_VI] = 2;
- ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
-
- /* give our connected endpoints some buffers */
- ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
- ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
-
- /* allocate some buffers that handle larger AMSDU frames */
- ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
-
- ath6kl_cookie_init(ar);
-
- ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
- ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
-
- if (suspend_cutpower)
- ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
-
- ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
- WIPHY_FLAG_HAVE_AP_SME |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
-
- if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
- ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-
- ar->wiphy->probe_resp_offload =
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
-
- set_bit(FIRST_BOOT, &ar->flag);
-
- ret = ath6kl_init_hw_start(ar);
- if (ret) {
- ath6kl_err("Failed to start hardware: %d\n", ret);
- goto err_rxbuf_cleanup;
- }
-
- /*
- * Set mac address which is received in ready event
- * FIXME: Move to ath6kl_interface_add()
- */
- memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
-
- return ret;
-
-err_rxbuf_cleanup:
- ath6kl_htc_flush_rx_buf(ar->htc_target);
- ath6kl_cleanup_amsdu_rxbufs(ar);
- rtnl_lock();
- ath6kl_deinit_if_data(netdev_priv(ndev));
- rtnl_unlock();
- wiphy_unregister(ar->wiphy);
-err_debug_init:
- ath6kl_debug_cleanup(ar);
-err_node_cleanup:
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
-err_htc_cleanup:
- ath6kl_htc_cleanup(ar->htc_target);
-err_power_off:
- ath6kl_hif_power_off(ar);
-err_bmi_cleanup:
- ath6kl_bmi_cleanup(ar);
-err_wq:
- destroy_workqueue(ar->ath6kl_wq);
-
- return ret;
-}
-
+/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
{
static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -1747,6 +1628,7 @@ void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
void ath6kl_stop_txrx(struct ath6kl *ar)
{
struct ath6kl_vif *vif, *tmp_vif;
+ int i;
set_bit(DESTROY_IN_PROGRESS, &ar->flag);
@@ -1755,13 +1637,16 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
return;
}
+ for (i = 0; i < AP_MAX_NUM_STA; i++)
+ aggr_reset_state(ar->sta_list[i].aggr_conn);
+
spin_lock_bh(&ar->list_lock);
list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
- ath6kl_deinit_if_data(vif);
+ ath6kl_cfg80211_vif_cleanup(vif);
rtnl_unlock();
spin_lock_bh(&ar->list_lock);
}
@@ -1791,8 +1676,11 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
* configure NOT to reset the target during a debug session.
*/
ath6kl_dbg(ATH6KL_DBG_TRC,
- "attempting to reset target on instance destroy\n");
+ "attempting to reset target on instance destroy\n");
ath6kl_reset_device(ar, ar->target_type, true, true);
clear_bit(WLAN_ENABLED, &ar->flag);
+
+ up(&ar->sem);
}
+EXPORT_SYMBOL(ath6kl_stop_txrx);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index eea3c747653e..229e1922ebe4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -52,9 +53,11 @@ struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
return conn;
}
-static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
- u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
+static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid,
+ u8 *wpaie, size_t ielen, u8 keymgmt,
+ u8 ucipher, u8 auth, u8 apsd_info)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_sta *sta;
u8 free_slot;
@@ -68,18 +71,31 @@ static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
sta->keymgmt = keymgmt;
sta->ucipher = ucipher;
sta->auth = auth;
+ sta->apsd_info = apsd_info;
ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
+ aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn);
}
static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
{
struct ath6kl_sta *sta = &ar->sta_list[i];
+ struct ath6kl_mgmt_buff *entry, *tmp;
/* empty the queued pkts in the PS queue if any */
spin_lock_bh(&sta->psq_lock);
skb_queue_purge(&sta->psq);
+ skb_queue_purge(&sta->apsdq);
+
+ if (sta->mgmt_psq_len != 0) {
+ list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) {
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&sta->mgmt_psq);
+ sta->mgmt_psq_len = 0;
+ }
+
spin_unlock_bh(&sta->psq_lock);
memset(&ar->ap_stats.sta[sta->aid - 1], 0,
@@ -90,7 +106,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
sta->sta_flags = 0;
ar->sta_list_index = ar->sta_list_index & ~(1 << i);
-
+ aggr_reset_state(sta->aggr_conn);
}
static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
@@ -252,7 +268,7 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
struct ath6kl_dbglog_hdr debug_hdr;
struct ath6kl_dbglog_buf debug_buf;
u32 address, length, dropped, firstbuf, debug_hdr_addr;
- int ret = 0, loop;
+ int ret, loop;
u8 *buf;
buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL);
@@ -334,7 +350,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
__le32 data;
if (target_type != TARGET_TYPE_AR6003 &&
- target_type != TARGET_TYPE_AR6004)
+ target_type != TARGET_TYPE_AR6004)
return;
data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
@@ -347,9 +363,6 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
case TARGET_TYPE_AR6004:
address = AR6004_RESET_CONTROL_ADDRESS;
break;
- default:
- address = AR6003_RESET_CONTROL_ADDRESS;
- break;
}
status = ath6kl_diag_write32(ar, address, data);
@@ -363,7 +376,7 @@ static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
u8 index;
u8 keyusage;
- for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
+ for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) {
if (vif->wep_key_list[index].key_len) {
keyusage = GROUP_USAGE;
if (index == vif->def_txkey_index)
@@ -428,9 +441,8 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
- u8 assoc_req_len, u8 *assoc_info)
+ u8 assoc_req_len, u8 *assoc_info, u8 apsd_info)
{
- struct ath6kl *ar = vif->ar;
u8 *ies = NULL, *wpa_ie = NULL, *pos;
size_t ies_len = 0;
struct station_info sinfo;
@@ -484,9 +496,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
pos += 2 + pos[1];
}
- ath6kl_add_new_sta(ar, mac_addr, aid, wpa_ie,
+ ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie,
wpa_ie ? 2 + wpa_ie[1] : 0,
- keymgmt, ucipher, auth);
+ keymgmt, ucipher, auth, apsd_info);
/* send event to application */
memset(&sinfo, 0, sizeof(sinfo));
@@ -589,8 +601,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
if ((vif->nw_type == INFRA_NETWORK))
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
- ar->listen_intvl_t,
- ar->listen_intvl_b);
+ vif->listen_intvl_t, 0);
netif_wake_queue(vif->ndev);
@@ -601,7 +612,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
netif_carrier_on(vif->ndev);
spin_unlock_bh(&vif->if_lock);
- aggr_reset_state(vif->aggr_cntxt);
+ aggr_reset_state(vif->aggr_cntxt->aggr_conn);
vif->reconnect_flag = 0;
if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
@@ -808,6 +819,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
struct sk_buff *skb;
bool psq_empty = false;
struct ath6kl *ar = vif->ar;
+ struct ath6kl_mgmt_buff *mgmt_buf;
conn = ath6kl_find_sta_by_aid(ar, aid);
@@ -818,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
* becomes empty update the PVB for this station.
*/
spin_lock_bh(&conn->psq_lock);
- psq_empty = skb_queue_empty(&conn->psq);
+ psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
@@ -826,15 +838,31 @@ void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
return;
spin_lock_bh(&conn->psq_lock);
- skb = skb_dequeue(&conn->psq);
- spin_unlock_bh(&conn->psq_lock);
+ if (conn->mgmt_psq_len > 0) {
+ mgmt_buf = list_first_entry(&conn->mgmt_psq,
+ struct ath6kl_mgmt_buff, list);
+ list_del(&mgmt_buf->list);
+ conn->mgmt_psq_len--;
+ spin_unlock_bh(&conn->psq_lock);
+
+ conn->sta_flags |= STA_PS_POLLED;
+ ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx,
+ mgmt_buf->id, mgmt_buf->freq,
+ mgmt_buf->wait, mgmt_buf->buf,
+ mgmt_buf->len, mgmt_buf->no_cck);
+ conn->sta_flags &= ~STA_PS_POLLED;
+ kfree(mgmt_buf);
+ } else {
+ skb = skb_dequeue(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
- conn->sta_flags |= STA_PS_POLLED;
- ath6kl_data_tx(skb, vif->ndev);
- conn->sta_flags &= ~STA_PS_POLLED;
+ conn->sta_flags |= STA_PS_POLLED;
+ ath6kl_data_tx(skb, vif->ndev);
+ conn->sta_flags &= ~STA_PS_POLLED;
+ }
spin_lock_bh(&conn->psq_lock);
- psq_empty = skb_queue_empty(&conn->psq);
+ psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
@@ -920,10 +948,10 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
}
ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
- assoc_resp_len, assoc_info,
- prot_reason_status);
+ assoc_resp_len, assoc_info,
+ prot_reason_status);
- aggr_reset_state(vif->aggr_cntxt);
+ aggr_reset_state(vif->aggr_cntxt->aggr_conn);
del_timer(&vif->disconnect_timer);
@@ -941,9 +969,9 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
} else {
set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) &&
- (prot_reason_status == 0x11)) ||
- ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
- && (vif->reconnect_flag == 1))) {
+ (prot_reason_status == 0x11)) ||
+ ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) &&
+ (vif->reconnect_flag == 1))) {
set_bit(CONNECTED, &vif->flags);
return;
}
@@ -1020,11 +1048,155 @@ static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
return &vif->net_stats;
}
-static struct net_device_ops ath6kl_netdev_ops = {
+static int ath6kl_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl *ar = vif->ar;
+ int err = 0;
+
+ if ((features & NETIF_F_RXCSUM) &&
+ (ar->rx_meta_ver != WMI_META_VERSION_2)) {
+ ar->rx_meta_ver = WMI_META_VERSION_2;
+ err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ ar->rx_meta_ver, 0, 0);
+ if (err) {
+ dev->features = features & ~NETIF_F_RXCSUM;
+ return err;
+ }
+ } else if (!(features & NETIF_F_RXCSUM) &&
+ (ar->rx_meta_ver == WMI_META_VERSION_2)) {
+ ar->rx_meta_ver = 0;
+ err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ ar->rx_meta_ver, 0, 0);
+ if (err) {
+ dev->features = features | NETIF_F_RXCSUM;
+ return err;
+ }
+
+ }
+
+ return err;
+}
+
+static void ath6kl_set_multicast_list(struct net_device *ndev)
+{
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+ bool mc_all_on = false, mc_all_off = false;
+ int mc_count = netdev_mc_count(ndev);
+ struct netdev_hw_addr *ha;
+ bool found;
+ struct ath6kl_mc_filter *mc_filter, *tmp;
+ struct list_head mc_filter_new;
+ int ret;
+
+ if (!test_bit(WMI_READY, &vif->ar->flag) ||
+ !test_bit(WLAN_ENABLED, &vif->flags))
+ return;
+
+ mc_all_on = !!(ndev->flags & IFF_PROMISC) ||
+ !!(ndev->flags & IFF_ALLMULTI) ||
+ !!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST);
+
+ mc_all_off = !(ndev->flags & IFF_MULTICAST) || mc_count == 0;
+
+ if (mc_all_on || mc_all_off) {
+ /* Enable/disable all multicast */
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n",
+ mc_all_on ? "enabling" : "disabling");
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
+ mc_all_on);
+ if (ret)
+ ath6kl_warn("Failed to %s multicast receive\n",
+ mc_all_on ? "enable" : "disable");
+ return;
+ }
+
+ list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
+ found = false;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (memcmp(ha->addr, mc_filter->hw_addr,
+ ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * Delete the filter which was previously set
+ * but not in the new request.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "Removing %pM from multicast filter\n",
+ mc_filter->hw_addr);
+ ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, mc_filter->hw_addr,
+ false);
+ if (ret) {
+ ath6kl_warn("Failed to remove multicast filter:%pM\n",
+ mc_filter->hw_addr);
+ return;
+ }
+
+ list_del(&mc_filter->list);
+ kfree(mc_filter);
+ }
+ }
+
+ INIT_LIST_HEAD(&mc_filter_new);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ found = false;
+ list_for_each_entry(mc_filter, &vif->mc_filter, list) {
+ if (memcmp(ha->addr, mc_filter->hw_addr,
+ ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter),
+ GFP_ATOMIC);
+ if (!mc_filter) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ memcpy(mc_filter->hw_addr, ha->addr,
+ ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
+ /* Set the multicast filter */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "Adding %pM to multicast filter list\n",
+ mc_filter->hw_addr);
+ ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, mc_filter->hw_addr,
+ true);
+ if (ret) {
+ ath6kl_warn("Failed to add multicast filter :%pM\n",
+ mc_filter->hw_addr);
+ kfree(mc_filter);
+ goto out;
+ }
+
+ list_add_tail(&mc_filter->list, &mc_filter_new);
+ }
+ }
+
+out:
+ list_splice_tail(&mc_filter_new, &vif->mc_filter);
+}
+
+static const struct net_device_ops ath6kl_netdev_ops = {
.ndo_open = ath6kl_open,
.ndo_stop = ath6kl_close,
.ndo_start_xmit = ath6kl_data_tx,
.ndo_get_stats = ath6kl_get_stats,
+ .ndo_set_features = ath6kl_set_features,
+ .ndo_set_rx_mode = ath6kl_set_multicast_list,
};
void init_netdev(struct net_device *dev)
@@ -1038,5 +1210,7 @@ void init_netdev(struct net_device *dev)
sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
+ WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
return;
}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 9475e2d0d0b7..53528648b425 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -31,6 +32,7 @@
struct ath6kl_sdio {
struct sdio_func *func;
+ /* protects access to bus_req_freeq */
spinlock_t lock;
/* free list */
@@ -49,14 +51,20 @@ struct ath6kl_sdio {
/* scatter request list head */
struct list_head scat_req;
+ atomic_t irq_handling;
+ wait_queue_head_t irq_wq;
+
+ /* protects access to scat_req */
spinlock_t scat_lock;
+
bool scatter_enabled;
bool is_disabled;
- atomic_t irq_handling;
const struct sdio_device_id *id;
struct work_struct wr_async_work;
struct list_head wr_asyncq;
+
+ /* protects access to wr_asyncq */
spinlock_t wr_async_lock;
};
@@ -402,7 +410,10 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
return -ENOMEM;
mutex_lock(&ar_sdio->dma_buffer_mutex);
tbuf = ar_sdio->dma_buffer;
- memcpy(tbuf, buf, len);
+
+ if (request & HIF_WRITE)
+ memcpy(tbuf, buf, len);
+
bounced = true;
} else
tbuf = buf;
@@ -461,7 +472,6 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
ar_sdio = sdio_get_drvdata(func);
atomic_set(&ar_sdio->irq_handling, 1);
-
/*
* Release the host during interrups so we can pick it back up when
* we process commands.
@@ -470,7 +480,10 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
+
atomic_set(&ar_sdio->irq_handling, 0);
+ wake_up(&ar_sdio->irq_wq);
+
WARN_ON(status && status != -ECANCELED);
}
@@ -571,6 +584,13 @@ static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
sdio_release_host(ar_sdio->func);
}
+static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+
+ return !atomic_read(&ar_sdio->irq_handling);
+}
+
static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
@@ -578,10 +598,14 @@ static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
sdio_claim_host(ar_sdio->func);
- /* Mask our function IRQ */
- while (atomic_read(&ar_sdio->irq_handling)) {
+ if (atomic_read(&ar_sdio->irq_handling)) {
sdio_release_host(ar_sdio->func);
- schedule_timeout(HZ / 10);
+
+ ret = wait_event_interruptible(ar_sdio->irq_wq,
+ ath6kl_sdio_is_on_irq(ar));
+ if (ret)
+ return;
+
sdio_claim_host(ar_sdio->func);
}
@@ -603,6 +627,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
node = list_first_entry(&ar_sdio->scat_req,
struct hif_scatter_req, list);
list_del(&node->list);
+
+ node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
}
spin_unlock_bh(&ar_sdio->scat_lock);
@@ -635,8 +661,8 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
return -EINVAL;
ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "hif-scatter: total len: %d scatter entries: %d\n",
- scat_req->len, scat_req->scat_entries);
+ "hif-scatter: total len: %d scatter entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
if (request & HIF_SYNCHRONOUS)
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
@@ -772,7 +798,6 @@ static int ath6kl_sdio_config(struct ath6kl *ar)
if (ret) {
ath6kl_err("Set sdio block size %d failed: %d)\n",
HIF_MBOX_BLOCK_SIZE, ret);
- sdio_release_host(func);
goto out;
}
@@ -782,7 +807,7 @@ out:
return ret;
}
-static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
@@ -793,60 +818,104 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
- if (!(flags & MMC_PM_KEEP_POWER) ||
- (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
- /* as host doesn't support keep power we need to cut power */
- return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
- NULL);
- }
+ if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
+ !(flags & MMC_PM_KEEP_POWER))
+ return -EINVAL;
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
- printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
- ret);
+ ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
return ret;
}
- if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
- goto deepsleep;
-
/* sdio irq wakes up host */
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ mmc_pm_flag_t flags;
+ bool try_deepsleep = false;
+ int ret;
if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
+
+ ret = ath6kl_set_sdio_pm_caps(ar);
+ if (ret)
+ goto cut_pwr;
+
ret = ath6kl_cfg80211_suspend(ar,
ATH6KL_CFG_SUSPEND_SCHED_SCAN,
NULL);
- if (ret) {
- ath6kl_warn("Schedule scan suspend failed: %d", ret);
- return ret;
- }
+ if (ret)
+ goto cut_pwr;
+
+ return 0;
+ }
+
+ if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
+ (!ar->suspend_mode && wow)) {
- ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ ret = ath6kl_set_sdio_pm_caps(ar);
if (ret)
- ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
+ goto cut_pwr;
- return ret;
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+ if (ret && ret != -ENOTCONN)
+ ath6kl_err("wow suspend failed: %d\n", ret);
+
+ if (ret &&
+ (!ar->wow_suspend_mode ||
+ ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
+ try_deepsleep = true;
+ else if (ret &&
+ ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
+ goto cut_pwr;
+ if (!ret)
+ return 0;
}
- if (wow) {
+ if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
+ !ar->suspend_mode || try_deepsleep) {
+
+ flags = sdio_get_host_pm_caps(func);
+ if (!(flags & MMC_PM_KEEP_POWER))
+ goto cut_pwr;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret)
+ goto cut_pwr;
+
/*
- * The host sdio controller is capable of keep power and
- * sdio irq wake up at this point. It's fine to continue
- * wow suspend operation.
+ * Workaround to support Deep Sleep with MSM, set the host pm
+ * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
+ * the sdc2_clock and internally allows MSM to enter
+ * TCXO shutdown properly.
*/
- ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
- if (ret)
- return ret;
+ if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
+ ret = sdio_set_host_pm_flags(func,
+ MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ goto cut_pwr;
+ }
- ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+ NULL);
if (ret)
- ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+ goto cut_pwr;
- return ret;
+ return 0;
}
-deepsleep:
- return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
+cut_pwr:
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
}
static int ath6kl_sdio_resume(struct ath6kl *ar)
@@ -869,8 +938,15 @@ static int ath6kl_sdio_resume(struct ath6kl *ar)
case ATH6KL_STATE_WOW:
break;
+
case ATH6KL_STATE_SCHED_SCAN:
break;
+
+ case ATH6KL_STATE_SUSPENDING:
+ break;
+
+ case ATH6KL_STATE_RESUMING:
+ break;
}
ath6kl_cfg80211_resume(ar);
@@ -949,7 +1025,7 @@ static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
(u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
if (status) {
ath6kl_err("%s: failed to read from window data addr\n",
- __func__);
+ __func__);
return status;
}
@@ -1260,10 +1336,12 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
+ init_waitqueue_head(&ar_sdio->irq_wq);
+
for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
- ar = ath6kl_core_alloc(&ar_sdio->func->dev);
+ ar = ath6kl_core_create(&ar_sdio->func->dev);
if (!ar) {
ath6kl_err("Failed to alloc ath6kl core\n");
ret = -ENOMEM;
@@ -1293,7 +1371,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
return ret;
err_core_alloc:
- ath6kl_core_free(ar_sdio->ar);
+ ath6kl_core_destroy(ar_sdio->ar);
err_dma:
kfree(ar_sdio->dma_buffer);
err_hif:
@@ -1316,6 +1394,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
cancel_work_sync(&ar_sdio->wr_async_work);
ath6kl_core_cleanup(ar_sdio->ar);
+ ath6kl_core_destroy(ar_sdio->ar);
kfree(ar_sdio->dma_buffer);
kfree(ar_sdio);
@@ -1332,7 +1411,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
static struct sdio_driver ath6kl_sdio_driver = {
- .name = "ath6kl",
+ .name = "ath6kl_sdio",
.id_table = ath6kl_sdio_devices,
.probe = ath6kl_sdio_probe,
.remove = ath6kl_sdio_remove,
@@ -1362,19 +1441,19 @@ MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
-MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
-MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index 108a723a1085..78e0ef4567a5 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2010 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,6 +20,7 @@
#define AR6003_BOARD_DATA_SZ 1024
#define AR6003_BOARD_EXT_DATA_SZ 768
+#define AR6003_BOARD_EXT_DATA_SZ_V2 1024
#define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index 381eb66a605f..6675c92b542b 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -15,6 +16,7 @@
*/
#include "testmode.h"
+#include "debug.h"
#include <net/netlink.h>
@@ -30,7 +32,7 @@ enum ath6kl_tm_attr {
enum ath6kl_tm_cmd {
ATH6KL_TM_CMD_TCMD = 0,
- ATH6KL_TM_CMD_RX_REPORT = 1,
+ ATH6KL_TM_CMD_RX_REPORT = 1, /* not used anymore */
};
#define ATH6KL_TM_DATA_MAX_LEN 5000
@@ -41,84 +43,33 @@ static const struct nla_policy ath6kl_tm_policy[ATH6KL_TM_ATTR_MAX + 1] = {
.len = ATH6KL_TM_DATA_MAX_LEN },
};
-void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf, size_t buf_len)
+void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
{
- if (down_interruptible(&ar->sem))
- return;
-
- kfree(ar->tm.rx_report);
-
- ar->tm.rx_report = kmemdup(buf, buf_len, GFP_KERNEL);
- ar->tm.rx_report_len = buf_len;
-
- up(&ar->sem);
-
- wake_up(&ar->event_wq);
-}
-
-static int ath6kl_tm_rx_report(struct ath6kl *ar, void *buf, size_t buf_len,
- struct sk_buff *skb)
-{
- int ret = 0;
- long left;
-
- if (down_interruptible(&ar->sem))
- return -ERESTARTSYS;
-
- if (!test_bit(WMI_READY, &ar->flag)) {
- ret = -EIO;
- goto out;
- }
-
- if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len) < 0) {
- up(&ar->sem);
- return -EIO;
- }
-
- left = wait_event_interruptible_timeout(ar->event_wq,
- ar->tm.rx_report != NULL,
- WMI_TIMEOUT);
+ struct sk_buff *skb;
- if (left == 0) {
- ret = -ETIMEDOUT;
- goto out;
- } else if (left < 0) {
- ret = left;
- goto out;
- }
+ if (!buf || buf_len == 0)
+ return;
- if (ar->tm.rx_report == NULL || ar->tm.rx_report_len == 0) {
- ret = -EINVAL;
- goto out;
+ skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL);
+ if (!skb) {
+ ath6kl_warn("failed to allocate testmode rx skb!\n");
+ return;
}
-
- NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, ar->tm.rx_report_len,
- ar->tm.rx_report);
-
- kfree(ar->tm.rx_report);
- ar->tm.rx_report = NULL;
-
-out:
- up(&ar->sem);
-
- return ret;
+ NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD);
+ NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf);
+ cfg80211_testmode_event(skb, GFP_KERNEL);
+ return;
nla_put_failure:
- ret = -ENOBUFS;
- goto out;
+ kfree_skb(skb);
+ ath6kl_warn("nla_put failed on testmode rx skb!\n");
}
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
{
struct ath6kl *ar = wiphy_priv(wiphy);
struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
- int err, buf_len, reply_len;
- struct sk_buff *skb;
+ int err, buf_len;
void *buf;
err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len,
@@ -143,24 +94,6 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
break;
case ATH6KL_TM_CMD_RX_REPORT:
- if (!tb[ATH6KL_TM_ATTR_DATA])
- return -EINVAL;
-
- buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]);
-
- reply_len = nla_total_size(ATH6KL_TM_DATA_MAX_LEN);
- skb = cfg80211_testmode_alloc_reply_skb(wiphy, reply_len);
- if (!skb)
- return -ENOMEM;
-
- err = ath6kl_tm_rx_report(ar, buf, buf_len, skb);
- if (err < 0) {
- kfree_skb(skb);
- return err;
- }
-
- return cfg80211_testmode_reply(skb);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h
index 43dffcc11fb1..fe651d6707df 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.h
+++ b/drivers/net/wireless/ath/ath6kl/testmode.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,13 +19,13 @@
#ifdef CONFIG_NL80211_TESTMODE
-void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf, size_t buf_len);
+void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
#else
-static inline void ath6kl_tm_rx_report_event(struct ath6kl *ar, void *buf,
- size_t buf_len)
+static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
+ size_t buf_len)
{
}
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 506a3031a885..f85353fd1792 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,6 +18,23 @@
#include "core.h"
#include "debug.h"
+/*
+ * tid - tid_mux0..tid_mux3
+ * aid - tid_mux4..tid_mux7
+ */
+#define ATH6KL_TID_MASK 0xf
+#define ATH6KL_AID_SHIFT 4
+
+static inline u8 ath6kl_get_tid(u8 tid_mux)
+{
+ return tid_mux & ATH6KL_TID_MASK;
+}
+
+static inline u8 ath6kl_get_aid(u8 tid_mux)
+{
+ return tid_mux >> ATH6KL_AID_SHIFT;
+}
+
static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
u32 *map_no)
{
@@ -77,12 +95,118 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
return ar->node_map[ep_map].ep_id;
}
+static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
+ struct ath6kl_vif *vif,
+ struct sk_buff *skb,
+ u32 *flags)
+{
+ struct ath6kl *ar = vif->ar;
+ bool is_apsdq_empty = false;
+ struct ethhdr *datap = (struct ethhdr *) skb->data;
+ u8 up = 0, traffic_class, *ip_hdr;
+ u16 ether_type;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+
+ if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
+ /*
+ * This tx is because of a uAPSD trigger, determine
+ * more and EOSP bit. Set EOSP if queue is empty
+ * or sufficient frames are delivered for this trigger.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->apsdq))
+ *flags |= WMI_DATA_HDR_FLAGS_MORE;
+ else if (conn->sta_flags & STA_PS_APSD_EOSP)
+ *flags |= WMI_DATA_HDR_FLAGS_EOSP;
+ *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
+ spin_unlock_bh(&conn->psq_lock);
+ return false;
+ } else if (!conn->apsd_info)
+ return false;
+
+ if (test_bit(WMM_ENABLED, &vif->flags)) {
+ ether_type = be16_to_cpu(datap->h_proto);
+ if (is_ethertype(ether_type)) {
+ /* packet is in DIX format */
+ ip_hdr = (u8 *)(datap + 1);
+ } else {
+ /* packet is in 802.3 format */
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)
+ (datap + 1);
+ ether_type = be16_to_cpu(llc_hdr->eth_type);
+ ip_hdr = (u8 *)(llc_hdr + 1);
+ }
+
+ if (ether_type == IP_ETHERTYPE)
+ up = ath6kl_wmi_determine_user_priority(
+ ip_hdr, 0);
+ }
+
+ traffic_class = ath6kl_wmi_get_traffic_class(up);
+
+ if ((conn->apsd_info & (1 << traffic_class)) == 0)
+ return false;
+
+ /* Queue the frames if the STA is sleeping */
+ spin_lock_bh(&conn->psq_lock);
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ skb_queue_tail(&conn->apsdq, skb);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this STA
+ */
+ if (is_apsdq_empty) {
+ ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 1, 0);
+ }
+ *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
+
+ return true;
+}
+
+static bool ath6kl_process_psq(struct ath6kl_sta *conn,
+ struct ath6kl_vif *vif,
+ struct sk_buff *skb,
+ u32 *flags)
+{
+ bool is_psq_empty = false;
+ struct ath6kl *ar = vif->ar;
+
+ if (conn->sta_flags & STA_PS_POLLED) {
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->psq))
+ *flags |= WMI_DATA_HDR_FLAGS_MORE;
+ spin_unlock_bh(&conn->psq_lock);
+ return false;
+ }
+
+ /* Queue the frames if the STA is sleeping */
+ spin_lock_bh(&conn->psq_lock);
+ is_psq_empty = skb_queue_empty(&conn->psq);
+ skb_queue_tail(&conn->psq, skb);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this
+ * STA.
+ */
+ if (is_psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 1);
+ return true;
+}
+
static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
- bool *more_data)
+ u32 *flags)
{
struct ethhdr *datap = (struct ethhdr *) skb->data;
struct ath6kl_sta *conn = NULL;
- bool ps_queued = false, is_psq_empty = false;
+ bool ps_queued = false;
struct ath6kl *ar = vif->ar;
if (is_multicast_ether_addr(datap->h_dest)) {
@@ -128,7 +252,7 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
*/
spin_lock_bh(&ar->mcastpsq_lock);
if (!skb_queue_empty(&ar->mcastpsq))
- *more_data = true;
+ *flags |= WMI_DATA_HDR_FLAGS_MORE;
spin_unlock_bh(&ar->mcastpsq_lock);
}
}
@@ -142,37 +266,13 @@ static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
}
if (conn->sta_flags & STA_PS_SLEEP) {
- if (!(conn->sta_flags & STA_PS_POLLED)) {
- /* Queue the frames if the STA is sleeping */
- spin_lock_bh(&conn->psq_lock);
- is_psq_empty = skb_queue_empty(&conn->psq);
- skb_queue_tail(&conn->psq, skb);
- spin_unlock_bh(&conn->psq_lock);
-
- /*
- * If this is the first pkt getting queued
- * for this STA, update the PVB for this
- * STA.
- */
- if (is_psq_empty)
- ath6kl_wmi_set_pvb_cmd(ar->wmi,
- vif->fw_vif_idx,
- conn->aid, 1);
-
- ps_queued = true;
- } else {
- /*
- * This tx is because of a PsPoll.
- * Determine if MoreData bit has to be set.
- */
- spin_lock_bh(&conn->psq_lock);
- if (!skb_queue_empty(&conn->psq))
- *more_data = true;
- spin_unlock_bh(&conn->psq_lock);
- }
+ ps_queued = ath6kl_process_uapsdq(conn,
+ vif, skb, flags);
+ if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
+ ps_queued = ath6kl_process_psq(conn,
+ vif, skb, flags);
}
}
-
return ps_queued;
}
@@ -185,6 +285,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
int status = 0;
struct ath6kl_cookie *cookie = NULL;
+ if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
+ return -EACCES;
+
spin_lock_bh(&ar->lock);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
@@ -242,8 +345,13 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */
- bool chk_adhoc_ps_mapping = false, more_data = false;
+ bool chk_adhoc_ps_mapping = false;
int ret;
+ struct wmi_tx_meta_v2 meta_v2;
+ void *meta;
+ u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
+ u8 meta_ver = 0;
+ u32 flags = 0;
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
"%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
@@ -255,16 +363,29 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+ if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
if (!test_bit(WMI_READY, &ar->flag))
goto fail_tx;
/* AP mode Power saving processing */
if (vif->nw_type == AP_NETWORK) {
- if (ath6kl_powersave_ap(vif, skb, &more_data))
+ if (ath6kl_powersave_ap(vif, skb, &flags))
return 0;
}
if (test_bit(WMI_ENABLED, &ar->flag)) {
+ if ((dev->features & NETIF_F_IP_CSUM) &&
+ (csum == CHECKSUM_PARTIAL)) {
+ csum_start = skb->csum_start -
+ (skb_network_header(skb) - skb->head) +
+ sizeof(struct ath6kl_llc_snap_hdr);
+ csum_dest = skb->csum_offset + csum_start;
+ }
+
if (skb_headroom(skb) < dev->needed_headroom) {
struct sk_buff *tmp_skb = skb;
@@ -281,15 +402,33 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
goto fail_tx;
}
- if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
- more_data, 0, 0, NULL,
- vif->fw_vif_idx)) {
- ath6kl_err("wmi_data_hdr_add failed\n");
+ if ((dev->features & NETIF_F_IP_CSUM) &&
+ (csum == CHECKSUM_PARTIAL)) {
+ meta_v2.csum_start = csum_start;
+ meta_v2.csum_dest = csum_dest;
+
+ /* instruct target to calculate checksum */
+ meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
+ meta_ver = WMI_META_VERSION_2;
+ meta = &meta_v2;
+ } else {
+ meta_ver = 0;
+ meta = NULL;
+ }
+
+ ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
+ DATA_MSGTYPE, flags, 0,
+ meta_ver,
+ meta, vif->fw_vif_idx);
+
+ if (ret) {
+ ath6kl_warn("failed to add wmi data header:%d\n"
+ , ret);
goto fail_tx;
}
if ((vif->nw_type == ADHOC_NETWORK) &&
- ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
+ ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
chk_adhoc_ps_mapping = true;
else {
/* get the stream mapping */
@@ -449,9 +588,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
* WMI queue with too many commands the only exception to
* this is during testing using endpointping.
*/
- spin_lock_bh(&ar->lock);
set_bit(WMI_CTRL_EP_FULL, &ar->flag);
- spin_unlock_bh(&ar->lock);
ath6kl_err("wmi ctrl ep is full\n");
return action;
}
@@ -465,7 +602,8 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
*/
if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
ar->hiac_stream_active_pri &&
- ar->cookie_count <= MAX_HI_COOKIE_NUM)
+ ar->cookie_count <=
+ target->endpoint[endpoint].tx_drop_packet_threshold)
/*
* Give preference to the highest priority stream by
* dropping the packets which overflowed.
@@ -479,9 +617,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
action != HTC_SEND_FULL_DROP) {
spin_unlock_bh(&ar->list_lock);
- spin_lock_bh(&vif->if_lock);
set_bit(NETQ_STOPPED, &vif->flags);
- spin_unlock_bh(&vif->if_lock);
netif_stop_queue(vif->ndev);
return action;
@@ -710,10 +846,12 @@ static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
{
struct sk_buff *skb = NULL;
- if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
- ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
+ if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
+ (AGGR_NUM_OF_FREE_NETBUFS >> 2))
+ ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
+ AGGR_NUM_OF_FREE_NETBUFS);
- skb = skb_dequeue(&p_aggr->free_q);
+ skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
return skb;
}
@@ -748,7 +886,7 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
if (!IS_ALIGNED((unsigned long) skb->data, 4))
skb->data = PTR_ALIGN(skb->data - 4, 4);
set_htc_rxpkt_info(packet, skb, skb->data,
- ATH6KL_BUFFER_SIZE, endpoint);
+ ATH6KL_BUFFER_SIZE, endpoint);
list_add_tail(&packet->list, &queue);
}
@@ -881,7 +1019,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
dev_kfree_skb(skb);
}
-static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
+static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
u16 seq_no, u8 order)
{
struct sk_buff *skb;
@@ -890,11 +1028,8 @@ static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
u16 idx, idx_end, seq_end;
struct rxtid_stats *stats;
- if (!p_aggr)
- return;
-
- rxtid = &p_aggr->rx_tid[tid];
- stats = &p_aggr->stat[tid];
+ rxtid = &agg_conn->rx_tid[tid];
+ stats = &agg_conn->stat[tid];
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -923,7 +1058,8 @@ static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
if (node->skb) {
if (node->is_amsdu)
- aggr_slice_amsdu(p_aggr, rxtid, node->skb);
+ aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
+ node->skb);
else
skb_queue_tail(&rxtid->q, node->skb);
node->skb = NULL;
@@ -939,10 +1075,10 @@ static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
stats->num_delivered += skb_queue_len(&rxtid->q);
while ((skb = skb_dequeue(&rxtid->q)))
- ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
}
-static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
+static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
u16 seq_no,
bool is_amsdu, struct sk_buff *frame)
{
@@ -954,18 +1090,18 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
bool is_queued = false;
u16 extended_end;
- rxtid = &agg_info->rx_tid[tid];
- stats = &agg_info->stat[tid];
+ rxtid = &agg_conn->rx_tid[tid];
+ stats = &agg_conn->stat[tid];
stats->num_into_aggr++;
if (!rxtid->aggr) {
if (is_amsdu) {
- aggr_slice_amsdu(agg_info, rxtid, frame);
+ aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
is_queued = true;
stats->num_amsdu++;
while ((skb = skb_dequeue(&rxtid->q)))
- ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
+ ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
skb);
}
return is_queued;
@@ -985,7 +1121,7 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
(cur < end || cur > extended_end)) ||
((end > extended_end) && (cur > extended_end) &&
(cur < end))) {
- aggr_deque_frms(agg_info, tid, 0, 0);
+ aggr_deque_frms(agg_conn, tid, 0, 0);
if (cur >= rxtid->hold_q_sz - 1)
rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
else
@@ -1002,7 +1138,7 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
st = ATH6KL_MAX_SEQ_NO -
(rxtid->hold_q_sz - 2 - cur);
- aggr_deque_frms(agg_info, tid, st, 0);
+ aggr_deque_frms(agg_conn, tid, st, 0);
}
stats->num_oow++;
@@ -1041,9 +1177,9 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
spin_unlock_bh(&rxtid->lock);
- aggr_deque_frms(agg_info, tid, 0, 1);
+ aggr_deque_frms(agg_conn, tid, 0, 1);
- if (agg_info->timer_scheduled)
+ if (agg_conn->timer_scheduled)
rxtid->progress = true;
else
for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
@@ -1054,8 +1190,8 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
* the frame doesn't remain stuck
* forever.
*/
- agg_info->timer_scheduled = true;
- mod_timer(&agg_info->timer,
+ agg_conn->timer_scheduled = true;
+ mod_timer(&agg_conn->timer,
(jiffies +
HZ * (AGGR_RX_TIMEOUT) / 1000));
rxtid->progress = false;
@@ -1067,6 +1203,76 @@ static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
return is_queued;
}
+static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
+ struct ath6kl_sta *conn)
+{
+ struct ath6kl *ar = vif->ar;
+ bool is_apsdq_empty, is_apsdq_empty_at_start;
+ u32 num_frames_to_deliver, flags;
+ struct sk_buff *skb = NULL;
+
+ /*
+ * If the APSD q for this STA is not empty, dequeue and
+ * send a pkt from the head of the q. Also update the
+ * More data bit in the WMI_DATA_HDR if there are
+ * more pkts for this STA in the APSD q.
+ * If there are no more pkts for this STA,
+ * update the APSD bitmap for this STA.
+ */
+
+ num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
+ ATH6KL_APSD_FRAME_MASK;
+ /*
+ * Number of frames to send in a service period is
+ * indicated by the station
+ * in the QOS_INFO of the association request
+ * If it is zero, send all frames
+ */
+ if (!num_frames_to_deliver)
+ num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
+
+ spin_lock_bh(&conn->psq_lock);
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ spin_unlock_bh(&conn->psq_lock);
+ is_apsdq_empty_at_start = is_apsdq_empty;
+
+ while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
+
+ spin_lock_bh(&conn->psq_lock);
+ skb = skb_dequeue(&conn->apsdq);
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * Set the STA flag to Trigger delivery,
+ * so that the frame will go out
+ */
+ conn->sta_flags |= STA_PS_APSD_TRIGGER;
+ num_frames_to_deliver--;
+
+ /* Last frame in the service period, set EOSP or queue empty */
+ if ((is_apsdq_empty) || (!num_frames_to_deliver))
+ conn->sta_flags |= STA_PS_APSD_EOSP;
+
+ ath6kl_data_tx(skb, vif->ndev);
+ conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
+ conn->sta_flags &= ~(STA_PS_APSD_EOSP);
+ }
+
+ if (is_apsdq_empty) {
+ if (is_apsdq_empty_at_start)
+ flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
+ else
+ flags = 0;
+
+ ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 0, flags);
+ }
+
+ return;
+}
+
void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
{
struct ath6kl *ar = target->dev->ar;
@@ -1078,10 +1284,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
int status = packet->status;
enum htc_endpoint_id ept = packet->endpoint;
bool is_amsdu, prev_ps, ps_state = false;
+ bool trig_state = false;
struct ath6kl_sta *conn = NULL;
struct sk_buff *skb1 = NULL;
struct ethhdr *datap = NULL;
struct ath6kl_vif *vif;
+ struct aggr_info_conn *aggr_conn;
u16 seq_no, offset;
u8 tid, if_idx;
@@ -1098,7 +1306,15 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
skb_pull(skb, HTC_HDR_LENGTH);
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
+ skb->data, skb->len);
+
if (ept == ar->ctrl_ep) {
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ ath6kl_check_wow_status(ar);
+ ath6kl_wmi_control_rx(ar->wmi, skb);
+ return;
+ }
if_idx =
wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
} else {
@@ -1123,10 +1339,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
spin_unlock_bh(&vif->if_lock);
-
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
- skb->data, skb->len);
-
skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) {
@@ -1138,11 +1350,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
ath6kl_check_wow_status(ar);
- if (ept == ar->ctrl_ep) {
- ath6kl_wmi_control_rx(ar->wmi, skb);
- return;
- }
-
min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
sizeof(struct ath6kl_llc_snap_hdr);
@@ -1171,6 +1378,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
WMI_DATA_HDR_PS_MASK);
offset = sizeof(struct wmi_data_hdr);
+ trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
switch (meta_type) {
case 0:
@@ -1209,18 +1417,61 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
else
conn->sta_flags &= ~STA_PS_SLEEP;
+ /* Accept trigger only when the station is in sleep */
+ if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
+ ath6kl_uapsd_trigger_frame_rx(vif, conn);
+
if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
if (!(conn->sta_flags & STA_PS_SLEEP)) {
struct sk_buff *skbuff = NULL;
+ bool is_apsdq_empty;
+ struct ath6kl_mgmt_buff *mgmt;
+ u8 idx;
spin_lock_bh(&conn->psq_lock);
- while ((skbuff = skb_dequeue(&conn->psq))
- != NULL) {
+ while (conn->mgmt_psq_len > 0) {
+ mgmt = list_first_entry(
+ &conn->mgmt_psq,
+ struct ath6kl_mgmt_buff,
+ list);
+ list_del(&mgmt->list);
+ conn->mgmt_psq_len--;
+ spin_unlock_bh(&conn->psq_lock);
+ idx = vif->fw_vif_idx;
+
+ ath6kl_wmi_send_mgmt_cmd(ar->wmi,
+ idx,
+ mgmt->id,
+ mgmt->freq,
+ mgmt->wait,
+ mgmt->buf,
+ mgmt->len,
+ mgmt->no_cck);
+
+ kfree(mgmt);
+ spin_lock_bh(&conn->psq_lock);
+ }
+ conn->mgmt_psq_len = 0;
+ while ((skbuff = skb_dequeue(&conn->psq))) {
+ spin_unlock_bh(&conn->psq_lock);
+ ath6kl_data_tx(skbuff, vif->ndev);
+ spin_lock_bh(&conn->psq_lock);
+ }
+
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ while ((skbuff = skb_dequeue(&conn->apsdq))) {
spin_unlock_bh(&conn->psq_lock);
ath6kl_data_tx(skbuff, vif->ndev);
spin_lock_bh(&conn->psq_lock);
}
spin_unlock_bh(&conn->psq_lock);
+
+ if (!is_apsdq_empty)
+ ath6kl_wmi_set_apsd_bfrd_traf(
+ ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 0, 0);
+
/* Clear the PVB for this STA */
ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
conn->aid, 0);
@@ -1314,11 +1565,21 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
datap = (struct ethhdr *) skb->data;
- if (is_unicast_ether_addr(datap->h_dest) &&
- aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
- is_amsdu, skb))
- /* aggregation code will handle the skb */
- return;
+ if (is_unicast_ether_addr(datap->h_dest)) {
+ if (vif->nw_type == AP_NETWORK) {
+ conn = ath6kl_find_sta(vif, datap->h_source);
+ if (!conn)
+ return;
+ aggr_conn = conn->aggr_conn;
+ } else
+ aggr_conn = vif->aggr_cntxt->aggr_conn;
+
+ if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
+ is_amsdu, skb)) {
+ /* aggregation code will handle the skb */
+ return;
+ }
+ }
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
@@ -1326,13 +1587,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
static void aggr_timeout(unsigned long arg)
{
u8 i, j;
- struct aggr_info *p_aggr = (struct aggr_info *) arg;
+ struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
struct rxtid *rxtid;
struct rxtid_stats *stats;
for (i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = &p_aggr->rx_tid[i];
- stats = &p_aggr->stat[i];
+ rxtid = &aggr_conn->rx_tid[i];
+ stats = &aggr_conn->stat[i];
if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
continue;
@@ -1343,18 +1604,18 @@ static void aggr_timeout(unsigned long arg)
rxtid->seq_next,
((rxtid->seq_next + rxtid->hold_q_sz-1) &
ATH6KL_MAX_SEQ_NO));
- aggr_deque_frms(p_aggr, i, 0, 0);
+ aggr_deque_frms(aggr_conn, i, 0, 0);
}
- p_aggr->timer_scheduled = false;
+ aggr_conn->timer_scheduled = false;
for (i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = &p_aggr->rx_tid[i];
+ rxtid = &aggr_conn->rx_tid[i];
if (rxtid->aggr && rxtid->hold_q) {
for (j = 0; j < rxtid->hold_q_sz; j++) {
if (rxtid->hold_q[j].skb) {
- p_aggr->timer_scheduled = true;
+ aggr_conn->timer_scheduled = true;
rxtid->timer_mon = true;
rxtid->progress = false;
break;
@@ -1366,24 +1627,24 @@ static void aggr_timeout(unsigned long arg)
}
}
- if (p_aggr->timer_scheduled)
- mod_timer(&p_aggr->timer,
+ if (aggr_conn->timer_scheduled)
+ mod_timer(&aggr_conn->timer,
jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
}
-static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
+static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
{
struct rxtid *rxtid;
struct rxtid_stats *stats;
- if (!p_aggr || tid >= NUM_OF_TIDS)
+ if (!aggr_conn || tid >= NUM_OF_TIDS)
return;
- rxtid = &p_aggr->rx_tid[tid];
- stats = &p_aggr->stat[tid];
+ rxtid = &aggr_conn->rx_tid[tid];
+ stats = &aggr_conn->stat[tid];
if (rxtid->aggr)
- aggr_deque_frms(p_aggr, tid, 0, 0);
+ aggr_deque_frms(aggr_conn, tid, 0, 0);
rxtid->aggr = false;
rxtid->progress = false;
@@ -1398,26 +1659,40 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
memset(stats, 0, sizeof(struct rxtid_stats));
}
-void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
u8 win_sz)
{
- struct aggr_info *p_aggr = vif->aggr_cntxt;
+ struct ath6kl_sta *sta;
+ struct aggr_info_conn *aggr_conn = NULL;
struct rxtid *rxtid;
struct rxtid_stats *stats;
u16 hold_q_size;
+ u8 tid, aid;
- if (!p_aggr)
+ if (vif->nw_type == AP_NETWORK) {
+ aid = ath6kl_get_aid(tid_mux);
+ sta = ath6kl_find_sta_by_aid(vif->ar, aid);
+ if (sta)
+ aggr_conn = sta->aggr_conn;
+ } else
+ aggr_conn = vif->aggr_cntxt->aggr_conn;
+
+ if (!aggr_conn)
return;
- rxtid = &p_aggr->rx_tid[tid];
- stats = &p_aggr->stat[tid];
+ tid = ath6kl_get_tid(tid_mux);
+ if (tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &aggr_conn->rx_tid[tid];
+ stats = &aggr_conn->stat[tid];
if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
__func__, win_sz, tid);
if (rxtid->aggr)
- aggr_delete_tid_state(p_aggr, tid);
+ aggr_delete_tid_state(aggr_conn, tid);
rxtid->seq_next = seq_no;
hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
@@ -1433,31 +1708,23 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
rxtid->aggr = true;
}
-struct aggr_info *aggr_init(struct net_device *dev)
+void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
+ struct aggr_info_conn *aggr_conn)
{
- struct aggr_info *p_aggr = NULL;
struct rxtid *rxtid;
u8 i;
- p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
- if (!p_aggr) {
- ath6kl_err("failed to alloc memory for aggr_node\n");
- return NULL;
- }
-
- p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
- p_aggr->dev = dev;
- init_timer(&p_aggr->timer);
- p_aggr->timer.function = aggr_timeout;
- p_aggr->timer.data = (unsigned long) p_aggr;
-
- p_aggr->timer_scheduled = false;
- skb_queue_head_init(&p_aggr->free_q);
+ aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
+ aggr_conn->dev = vif->ndev;
+ init_timer(&aggr_conn->timer);
+ aggr_conn->timer.function = aggr_timeout;
+ aggr_conn->timer.data = (unsigned long) aggr_conn;
+ aggr_conn->aggr_info = aggr_info;
- ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
+ aggr_conn->timer_scheduled = false;
for (i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = &p_aggr->rx_tid[i];
+ rxtid = &aggr_conn->rx_tid[i];
rxtid->aggr = false;
rxtid->progress = false;
rxtid->timer_mon = false;
@@ -1465,29 +1732,75 @@ struct aggr_info *aggr_init(struct net_device *dev)
spin_lock_init(&rxtid->lock);
}
+}
+
+struct aggr_info *aggr_init(struct ath6kl_vif *vif)
+{
+ struct aggr_info *p_aggr = NULL;
+
+ p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
+ if (!p_aggr) {
+ ath6kl_err("failed to alloc memory for aggr_node\n");
+ return NULL;
+ }
+
+ p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
+ if (!p_aggr->aggr_conn) {
+ ath6kl_err("failed to alloc memory for connection specific aggr info\n");
+ kfree(p_aggr);
+ return NULL;
+ }
+
+ aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
+
+ skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
+ ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
+
return p_aggr;
}
-void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
{
- struct aggr_info *p_aggr = vif->aggr_cntxt;
+ struct ath6kl_sta *sta;
struct rxtid *rxtid;
+ struct aggr_info_conn *aggr_conn = NULL;
+ u8 tid, aid;
+
+ if (vif->nw_type == AP_NETWORK) {
+ aid = ath6kl_get_aid(tid_mux);
+ sta = ath6kl_find_sta_by_aid(vif->ar, aid);
+ if (sta)
+ aggr_conn = sta->aggr_conn;
+ } else
+ aggr_conn = vif->aggr_cntxt->aggr_conn;
- if (!p_aggr)
+ if (!aggr_conn)
return;
- rxtid = &p_aggr->rx_tid[tid];
+ tid = ath6kl_get_tid(tid_mux);
+ if (tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &aggr_conn->rx_tid[tid];
if (rxtid->aggr)
- aggr_delete_tid_state(p_aggr, tid);
+ aggr_delete_tid_state(aggr_conn, tid);
}
-void aggr_reset_state(struct aggr_info *aggr_info)
+void aggr_reset_state(struct aggr_info_conn *aggr_conn)
{
u8 tid;
+ if (!aggr_conn)
+ return;
+
+ if (aggr_conn->timer_scheduled) {
+ del_timer(&aggr_conn->timer);
+ aggr_conn->timer_scheduled = false;
+ }
+
for (tid = 0; tid < NUM_OF_TIDS; tid++)
- aggr_delete_tid_state(aggr_info, tid);
+ aggr_delete_tid_state(aggr_conn, tid);
}
/* clean up our amsdu buffer list */
@@ -1514,28 +1827,11 @@ void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
void aggr_module_destroy(struct aggr_info *aggr_info)
{
- struct rxtid *rxtid;
- u8 i, k;
-
if (!aggr_info)
return;
- if (aggr_info->timer_scheduled) {
- del_timer(&aggr_info->timer);
- aggr_info->timer_scheduled = false;
- }
-
- for (i = 0; i < NUM_OF_TIDS; i++) {
- rxtid = &aggr_info->rx_tid[i];
- if (rxtid->hold_q) {
- for (k = 0; k < rxtid->hold_q_sz; k++)
- dev_kfree_skb(rxtid->hold_q[k].skb);
- kfree(rxtid->hold_q);
- }
-
- skb_queue_purge(&rxtid->q);
- }
-
- skb_queue_purge(&aggr_info->free_q);
+ aggr_reset_state(aggr_info->aggr_conn);
+ skb_queue_purge(&aggr_info->rx_amsdu_freeq);
+ kfree(aggr_info->aggr_conn);
kfree(aggr_info);
}
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
new file mode 100644
index 000000000000..325b1224c2b1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "debug.h"
+#include "core.h"
+
+/* usb device object */
+struct ath6kl_usb {
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ u8 *diag_cmd_buffer;
+ u8 *diag_resp_buffer;
+ struct ath6kl *ar;
+};
+
+/* diagnostic command defnitions */
+#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
+#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
+#define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3
+#define ATH6KL_USB_CONTROL_REQ_DIAG_RESP 4
+
+#define ATH6KL_USB_CTRL_DIAG_CC_READ 0
+#define ATH6KL_USB_CTRL_DIAG_CC_WRITE 1
+
+struct ath6kl_usb_ctrl_diag_cmd_write {
+ __le32 cmd;
+ __le32 address;
+ __le32 value;
+ __le32 _pad[1];
+} __packed;
+
+struct ath6kl_usb_ctrl_diag_cmd_read {
+ __le32 cmd;
+ __le32 address;
+} __packed;
+
+struct ath6kl_usb_ctrl_diag_resp_read {
+ __le32 value;
+} __packed;
+
+#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
+#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
+
+static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
+{
+ usb_set_intfdata(ar_usb->interface, NULL);
+
+ kfree(ar_usb->diag_cmd_buffer);
+ kfree(ar_usb->diag_resp_buffer);
+
+ kfree(ar_usb);
+}
+
+static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
+{
+ struct ath6kl_usb *ar_usb = NULL;
+ struct usb_device *dev = interface_to_usbdev(interface);
+ int status = 0;
+
+ ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
+ if (ar_usb == NULL)
+ goto fail_ath6kl_usb_create;
+
+ memset(ar_usb, 0, sizeof(struct ath6kl_usb));
+ usb_set_intfdata(interface, ar_usb);
+ ar_usb->udev = dev;
+ ar_usb->interface = interface;
+
+ ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
+ if (ar_usb->diag_cmd_buffer == NULL) {
+ status = -ENOMEM;
+ goto fail_ath6kl_usb_create;
+ }
+
+ ar_usb->diag_resp_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_RESP,
+ GFP_KERNEL);
+ if (ar_usb->diag_resp_buffer == NULL) {
+ status = -ENOMEM;
+ goto fail_ath6kl_usb_create;
+ }
+
+fail_ath6kl_usb_create:
+ if (status != 0) {
+ ath6kl_usb_destroy(ar_usb);
+ ar_usb = NULL;
+ }
+ return ar_usb;
+}
+
+static void ath6kl_usb_device_detached(struct usb_interface *interface)
+{
+ struct ath6kl_usb *ar_usb;
+
+ ar_usb = usb_get_intfdata(interface);
+ if (ar_usb == NULL)
+ return;
+
+ ath6kl_stop_txrx(ar_usb->ar);
+
+ ath6kl_core_cleanup(ar_usb->ar);
+
+ ath6kl_usb_destroy(ar_usb);
+}
+
+static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ memcpy(buf, data, size);
+ }
+
+ /* note: if successful returns number of bytes transfered */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_sndctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 1000);
+
+ if (ret < 0) {
+ ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
+ __func__, ret);
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transfered */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_rcvctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 2 * HZ);
+
+ if (ret < 0) {
+ ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
+ __func__, ret);
+ }
+
+ memcpy((u8 *) data, buf, size);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath6kl_usb_ctrl_msg_exchange(struct ath6kl_usb *ar_usb,
+ u8 req_val, u8 *req_buf, u32 req_len,
+ u8 resp_val, u8 *resp_buf, u32 *resp_len)
+{
+ int ret;
+
+ /* send command */
+ ret = ath6kl_usb_submit_ctrl_out(ar_usb, req_val, 0, 0,
+ req_buf, req_len);
+
+ if (ret != 0)
+ return ret;
+
+ if (resp_buf == NULL) {
+ /* no expected response */
+ return ret;
+ }
+
+ /* get response */
+ ret = ath6kl_usb_submit_ctrl_in(ar_usb, resp_val, 0, 0,
+ resp_buf, *resp_len);
+
+ return ret;
+}
+
+static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ struct ath6kl_usb_ctrl_diag_resp_read *resp;
+ struct ath6kl_usb_ctrl_diag_cmd_read *cmd;
+ u32 resp_len;
+ int ret;
+
+ cmd = (struct ath6kl_usb_ctrl_diag_cmd_read *) ar_usb->diag_cmd_buffer;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->cmd = ATH6KL_USB_CTRL_DIAG_CC_READ;
+ cmd->address = cpu_to_le32(address);
+ resp_len = sizeof(*resp);
+
+ ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *) cmd,
+ sizeof(struct ath6kl_usb_ctrl_diag_cmd_write),
+ ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
+ ar_usb->diag_resp_buffer, &resp_len);
+
+ if (ret)
+ return ret;
+
+ resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
+ ar_usb->diag_resp_buffer;
+
+ *data = le32_to_cpu(resp->value);
+
+ return ret;
+}
+
+static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
+
+ cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
+
+ memset(cmd, 0, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write));
+ cmd->cmd = cpu_to_le32(ATH6KL_USB_CTRL_DIAG_CC_WRITE);
+ cmd->address = cpu_to_le32(address);
+ cmd->value = data;
+
+ return ath6kl_usb_ctrl_msg_exchange(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *) cmd,
+ sizeof(*cmd),
+ 0, NULL, NULL);
+
+}
+
+static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ int ret;
+
+ /* get response */
+ ret = ath6kl_usb_submit_ctrl_in(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
+ 0, 0, buf, len);
+ if (ret != 0) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ int ret;
+
+ /* send command */
+ ret = ath6kl_usb_submit_ctrl_out(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
+ 0, 0, buf, len);
+ if (ret != 0) {
+ ath6kl_err("unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_usb_power_on(struct ath6kl *ar)
+{
+ return 0;
+}
+
+static int ath6kl_usb_power_off(struct ath6kl *ar)
+{
+ return 0;
+}
+
+static const struct ath6kl_hif_ops ath6kl_usb_ops = {
+ .diag_read32 = ath6kl_usb_diag_read32,
+ .diag_write32 = ath6kl_usb_diag_write32,
+ .bmi_read = ath6kl_usb_bmi_read,
+ .bmi_write = ath6kl_usb_bmi_write,
+ .power_on = ath6kl_usb_power_on,
+ .power_off = ath6kl_usb_power_off,
+};
+
+/* ath6kl usb driver registered functions */
+static int ath6kl_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(interface);
+ struct ath6kl *ar;
+ struct ath6kl_usb *ar_usb = NULL;
+ int vendor_id, product_id;
+ int ret = 0;
+
+ usb_get_dev(dev);
+
+ vendor_id = le16_to_cpu(dev->descriptor.idVendor);
+ product_id = le16_to_cpu(dev->descriptor.idProduct);
+
+ ath6kl_dbg(ATH6KL_DBG_USB, "vendor_id = %04x\n", vendor_id);
+ ath6kl_dbg(ATH6KL_DBG_USB, "product_id = %04x\n", product_id);
+
+ if (interface->cur_altsetting)
+ ath6kl_dbg(ATH6KL_DBG_USB, "USB Interface %d\n",
+ interface->cur_altsetting->desc.bInterfaceNumber);
+
+
+ if (dev->speed == USB_SPEED_HIGH)
+ ath6kl_dbg(ATH6KL_DBG_USB, "USB 2.0 Host\n");
+ else
+ ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n");
+
+ ar_usb = ath6kl_usb_create(interface);
+
+ if (ar_usb == NULL) {
+ ret = -ENOMEM;
+ goto err_usb_put;
+ }
+
+ ar = ath6kl_core_create(&ar_usb->udev->dev);
+ if (ar == NULL) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_usb_destroy;
+ }
+
+ ar->hif_priv = ar_usb;
+ ar->hif_type = ATH6KL_HIF_TYPE_USB;
+ ar->hif_ops = &ath6kl_usb_ops;
+ ar->mbox_info.block_size = 16;
+ ar->bmi.max_data_size = 252;
+
+ ar_usb->ar = ar;
+
+ ret = ath6kl_core_init(ar);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core: %d\n", ret);
+ goto err_core_free;
+ }
+
+ return ret;
+
+err_core_free:
+ ath6kl_core_destroy(ar);
+err_usb_destroy:
+ ath6kl_usb_destroy(ar_usb);
+err_usb_put:
+ usb_put_dev(dev);
+
+ return ret;
+}
+
+static void ath6kl_usb_remove(struct usb_interface *interface)
+{
+ usb_put_dev(interface_to_usbdev(interface));
+ ath6kl_usb_device_detached(interface);
+}
+
+/* table of devices that work with this driver */
+static struct usb_device_id ath6kl_usb_ids[] = {
+ {USB_DEVICE(0x0cf3, 0x9374)},
+ { /* Terminating entry */ },
+};
+
+MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
+
+static struct usb_driver ath6kl_usb_driver = {
+ .name = "ath6kl_usb",
+ .probe = ath6kl_usb_probe,
+ .disconnect = ath6kl_usb_remove,
+ .id_table = ath6kl_usb_ids,
+};
+
+static int ath6kl_usb_init(void)
+{
+ usb_register(&ath6kl_usb_driver);
+ return 0;
+}
+
+static void ath6kl_usb_exit(void)
+{
+ usb_deregister(&ath6kl_usb_driver);
+}
+
+module_init(ath6kl_usb_init);
+module_exit(ath6kl_usb_exit);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index f6f2aa27fc20..2b442332cd0f 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -126,7 +127,7 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
if (!is_ethertype(be16_to_cpu(type))) {
ath6kl_dbg(ATH6KL_DBG_WMI,
- "%s: pkt is already in 802.3 format\n", __func__);
+ "%s: pkt is already in 802.3 format\n", __func__);
return 0;
}
@@ -180,7 +181,7 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
}
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
- u8 msg_type, bool more_data,
+ u8 msg_type, u32 flags,
enum wmi_data_hdr_data_type data_type,
u8 meta_ver, void *tx_meta_info, u8 if_idx)
{
@@ -204,17 +205,19 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
- if (more_data)
- data_hdr->info |=
- WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
+ if (flags & WMI_DATA_HDR_FLAGS_MORE)
+ data_hdr->info |= WMI_DATA_HDR_MORE;
- data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
- data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
+ if (flags & WMI_DATA_HDR_FLAGS_EOSP)
+ data_hdr->info3 |= cpu_to_le16(WMI_DATA_HDR_EOSP);
+
+ data_hdr->info2 |= cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
+ data_hdr->info3 |= cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
return 0;
}
-static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
+u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
{
struct iphdr *ip_hdr = (struct iphdr *) pkt;
u8 ip_pri;
@@ -236,6 +239,11 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
return ip_pri;
}
+u8 ath6kl_wmi_get_traffic_class(u8 user_priority)
+{
+ return up_to_ac[user_priority & 0x7];
+}
+
int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
struct sk_buff *skb,
u32 layer2_priority, bool wmm_enabled,
@@ -419,9 +427,6 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
evt->num_msg, evt->msg_len, evt->msg_type);
- if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_WMI))
- return 0;
-
for (index = 0; index < evt->num_msg; index++) {
size = sizeof(struct wmi_tx_complete_event) +
(index * sizeof(struct tx_complete_msg_v1));
@@ -552,7 +557,8 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -591,7 +597,8 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -786,12 +793,14 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
ev->u.ap_sta.keymgmt,
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.apsd_info);
+
ath6kl_connect_ap_mode_sta(
vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
ev->u.ap_sta.keymgmt,
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.auth, ev->assoc_req_len,
- ev->assoc_info + ev->beacon_ie_len);
+ ev->assoc_info + ev->beacon_ie_len,
+ ev->u.ap_sta.apsd_info);
}
return 0;
}
@@ -819,8 +828,8 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
/* WMM OUT (00:50:F2) */
- if (pie[1] > 5
- && pie[6] == WMM_PARAM_OUI_SUBTYPE)
+ if (pie[1] > 5 &&
+ pie[6] == WMM_PARAM_OUI_SUBTYPE)
wmi->is_wmm_enabled = true;
}
break;
@@ -904,17 +913,17 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
- regpair->regDmnEnum);
+ regpair->regDmnEnum);
}
- if (country) {
+ if (country && wmi->parent_dev->wiphy_registered) {
alpha2[0] = country->isoName[0];
alpha2[1] = country->isoName[1];
regulatory_hint(wmi->parent_dev->wiphy, alpha2);
ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
- alpha2[0], alpha2[1]);
+ alpha2[0], alpha2[1]);
}
}
@@ -1025,8 +1034,9 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
if (len < 8 + 2 + 2)
return -EINVAL;
- if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
- && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
+ if (bih->frame_type == BEACON_FTYPE &&
+ test_bit(CONNECTED, &vif->flags) &&
+ memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
const u8 *tim;
tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
len - 8 - 2 - 2);
@@ -1145,9 +1155,9 @@ static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_tcmd_test_report_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_test_rx(struct wmi *wmi, u8 *datap, int len)
{
- ath6kl_tm_rx_report_event(wmi->parent_dev, datap, len);
+ ath6kl_tm_rx_event(wmi->parent_dev, datap, len);
return 0;
}
@@ -1358,8 +1368,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Upper threshold breached */
if (rssi < sq_thresh->upper_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
- "spurious upper rssi threshold event: %d\n",
- rssi);
+ "spurious upper rssi threshold event: %d\n",
+ rssi);
} else if ((rssi < sq_thresh->upper_threshold[1]) &&
(rssi >= sq_thresh->upper_threshold[0])) {
new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
@@ -1382,7 +1392,7 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Lower threshold breached */
if (rssi > sq_thresh->lower_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
- "spurious lower rssi threshold event: %d %d\n",
+ "spurious lower rssi threshold event: %d %d\n",
rssi, sq_thresh->lower_threshold[0]);
} else if ((rssi > sq_thresh->lower_threshold[1]) &&
(rssi <= sq_thresh->lower_threshold[0])) {
@@ -1543,8 +1553,8 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Upper threshold breached */
if (snr < sq_thresh->upper_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
- "spurious upper snr threshold event: %d\n",
- snr);
+ "spurious upper snr threshold event: %d\n",
+ snr);
} else if ((snr < sq_thresh->upper_threshold[1]) &&
(snr >= sq_thresh->upper_threshold[0])) {
new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
@@ -1561,8 +1571,8 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
/* Lower threshold breached */
if (snr > sq_thresh->lower_threshold[0]) {
ath6kl_dbg(ATH6KL_DBG_WMI,
- "spurious lower snr threshold event: %d\n",
- sq_thresh->lower_threshold[0]);
+ "spurious lower snr threshold event: %d\n",
+ sq_thresh->lower_threshold[0]);
} else if ((snr > sq_thresh->lower_threshold[1]) &&
(snr <= sq_thresh->lower_threshold[0])) {
new_threshold = WMI_SNR_THRESHOLD4_BELOW;
@@ -2020,6 +2030,26 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
+int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
+ u16 bmiss_time, u16 num_beacons)
+{
+ struct sk_buff *skb;
+ struct wmi_bmiss_time_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bmiss_time_cmd *) skb->data;
+ cmd->bmiss_time = cpu_to_le16(bmiss_time);
+ cmd->num_beacons = cpu_to_le16(num_beacons);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BMISS_TIME_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
{
struct sk_buff *skb;
@@ -2479,15 +2509,16 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
return ret;
}
-int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
+ __be32 ips0, __be32 ips1)
{
struct sk_buff *skb;
struct wmi_set_ip_cmd *cmd;
int ret;
/* Multicast address are not valid */
- if ((*((u8 *) &ip_cmd->ips[0]) >= 0xE0) ||
- (*((u8 *) &ip_cmd->ips[1]) >= 0xE0))
+ if (ipv4_is_multicast(ips0) ||
+ ipv4_is_multicast(ips1))
return -EINVAL;
skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
@@ -2495,9 +2526,10 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
return -ENOMEM;
cmd = (struct wmi_set_ip_cmd *) skb->data;
- memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
+ cmd->ips[0] = ips0;
+ cmd->ips[1] = ips1;
- ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IP_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
@@ -2582,6 +2614,18 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
+/* This command has zero length payload */
+static int ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(struct wmi *wmi,
+ struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = wmi->parent_dev;
+
+ set_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+ wake_up(&ar->event_wq);
+
+ return 0;
+}
+
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_wow_mode wow_mode,
u32 filter, u16 host_req_delay)
@@ -2591,7 +2635,7 @@ int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
int ret;
if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
- wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+ wow_mode != ATH6KL_WOW_MODE_DISABLE) {
ath6kl_err("invalid wow mode: %d\n", wow_mode);
return -EINVAL;
}
@@ -2612,7 +2656,8 @@ int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u8 list_id, u8 filter_size,
- u8 filter_offset, u8 *filter, u8 *mask)
+ u8 filter_offset, const u8 *filter,
+ const u8 *mask)
{
struct sk_buff *skb;
struct wmi_add_wow_pattern_cmd *cmd;
@@ -2853,6 +2898,51 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
return ret;
}
+int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on)
+{
+ struct sk_buff *skb;
+ struct wmi_mcast_filter_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mcast_filter_cmd *) skb->data;
+ cmd->mcast_all_enable = mc_all_on;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_MCAST_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
+ u8 *filter, bool add_filter)
+{
+ struct sk_buff *skb;
+ struct wmi_mcast_filter_add_del_cmd *cmd;
+ int ret;
+
+ if ((filter[0] != 0x33 || filter[1] != 0x33) &&
+ (filter[0] != 0x01 || filter[1] != 0x00 ||
+ filter[2] != 0x5e || filter[3] > 0x7f)) {
+ ath6kl_warn("invalid multicast filter address\n");
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mcast_filter_add_del_cmd *) skb->data;
+ memcpy(cmd->mcast_mac, filter, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ add_filter ? WMI_SET_MCAST_FILTER_CMDID :
+ WMI_DEL_MCAST_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
s32 ath6kl_wmi_get_rate(s8 rate_index)
{
@@ -2946,6 +3036,59 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_hidden_ssid_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_hidden_ssid_cmd *) skb->data;
+ cmd->hidden_ssid = enable ? 1 : 0;
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_HIDDEN_SSID_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+/* This command will be used to enable/disable AP uAPSD feature */
+int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable)
+{
+ struct wmi_ap_set_apsd_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_set_apsd_cmd *)skb->data;
+ cmd->enable = enable;
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_APSD_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi, u8 if_idx,
+ u16 aid, u16 bitmap, u32 flags)
+{
+ struct wmi_ap_apsd_buffered_traffic_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_apsd_buffered_traffic_cmd *)skb->data;
+ cmd->aid = cpu_to_le16(aid);
+ cmd->bitmap = cpu_to_le16(bitmap);
+ cmd->flags = cpu_to_le32(flags);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
struct ath6kl_vif *vif)
{
@@ -3078,8 +3221,9 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
* ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
* mgmt operations using station interface.
*/
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
- u32 wait, const u8 *data, u16 data_len)
+static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
+ u32 freq, u32 wait, const u8 *data,
+ u16 data_len)
{
struct sk_buff *skb;
struct wmi_send_action_cmd *p;
@@ -3115,9 +3259,9 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
- u32 wait, const u8 *data, u16 data_len,
- u32 no_cck)
+static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
+ u32 freq, u32 wait, const u8 *data,
+ u16 data_len, u32 no_cck)
{
struct sk_buff *skb;
struct wmi_send_mgmt_cmd *p;
@@ -3154,6 +3298,32 @@ int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck)
+{
+ int status;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ status = __ath6kl_wmi_send_mgmt_cmd(ar->wmi, if_idx, id, freq,
+ wait, data, data_len,
+ no_cck);
+ } else {
+ status = ath6kl_wmi_send_action_cmd(ar->wmi, if_idx, id, freq,
+ wait, data, data_len);
+ }
+
+ return status;
+}
+
int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
const u8 *dst, const u8 *data,
u16 data_len)
@@ -3265,32 +3435,101 @@ static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
}
-/* Control Path */
-int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
+/* Process interface specific wmi events, caller would free the datap */
+static int ath6kl_wmi_proc_events_vif(struct wmi *wmi, u16 if_idx, u16 cmd_id,
+ u8 *datap, u32 len)
{
- struct wmi_cmd_hdr *cmd;
struct ath6kl_vif *vif;
- u32 len;
- u16 id;
- u8 if_idx;
- u8 *datap;
- int ret = 0;
- if (WARN_ON(skb == NULL))
+ vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+ if (!vif) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Wmi event for unavailable vif, vif_index:%d\n",
+ if_idx);
return -EINVAL;
+ }
- if (skb->len < sizeof(struct wmi_cmd_hdr)) {
- ath6kl_err("bad packet 1\n");
- dev_kfree_skb(skb);
+ switch (cmd_id) {
+ case WMI_CONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
+ return ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
+ case WMI_DISCONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
+ return ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
+ case WMI_TKIP_MICERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
+ return ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
+ case WMI_BSSINFO_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
+ return ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
+ case WMI_NEIGHBOR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
+ return ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+ vif);
+ case WMI_SCAN_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
+ return ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
+ case WMI_REPORT_STATISTICS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
+ return ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
+ case WMI_CAC_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
+ return ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
+ case WMI_PSPOLL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
+ return ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
+ case WMI_DTIMEXPIRY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
+ return ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
+ case WMI_ADDBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
+ return ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
+ case WMI_DELBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
+ return ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
+ case WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID");
+ return ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(wmi, vif);
+ case WMI_REMAIN_ON_CHNL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
+ return ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
+ case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
+ return ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
+ len, vif);
+ case WMI_TX_STATUS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
+ return ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
+ case WMI_RX_PROBE_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
+ return ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
+ case WMI_RX_ACTION_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
+ return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id);
return -EINVAL;
}
+ return 0;
+}
+
+static int ath6kl_wmi_proc_events(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd;
+ int ret = 0;
+ u32 len;
+ u16 id;
+ u8 if_idx;
+ u8 *datap;
+
cmd = (struct wmi_cmd_hdr *) skb->data;
id = le16_to_cpu(cmd->cmd_id);
if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
-
datap = skb->data;
len = skb->len;
@@ -3298,15 +3537,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
datap, len);
- vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
- if (!vif) {
- ath6kl_dbg(ATH6KL_DBG_WMI,
- "Wmi event for unavailable vif, vif_index:%d\n",
- if_idx);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-
switch (id) {
case WMI_GET_BITRATE_CMDID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -3324,26 +3554,10 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
break;
- case WMI_CONNECT_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
- ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
- break;
- case WMI_DISCONNECT_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
- ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
- break;
case WMI_PEER_NODE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
break;
- case WMI_TKIP_MICERR_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
- ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
- break;
- case WMI_BSSINFO_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
- ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
- break;
case WMI_REGDOMAIN_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
ath6kl_wmi_regdomain_event(wmi, datap, len);
@@ -3352,23 +3566,10 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
break;
- case WMI_NEIGHBOR_REPORT_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
- ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
- vif);
- break;
- case WMI_SCAN_COMPLETE_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
- ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
- break;
case WMI_CMDERROR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
break;
- case WMI_REPORT_STATISTICS_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
- ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
- break;
case WMI_RSSI_THRESHOLD_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
@@ -3388,10 +3589,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
break;
- case WMI_CAC_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
- ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
- break;
case WMI_CHANNEL_CHANGE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
break;
@@ -3400,7 +3597,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_TEST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TEST_EVENTID\n");
- ret = ath6kl_wmi_tcmd_test_report_rx(wmi, datap, len);
+ ret = ath6kl_wmi_test_rx(wmi, datap, len);
break;
case WMI_GET_FIXRATES_CMDID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
@@ -3431,28 +3628,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
break;
- case WMI_PSPOLL_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
- ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
- break;
- case WMI_DTIMEXPIRY_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
- ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
- break;
case WMI_SET_PARAMS_REPLY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
break;
- case WMI_ADDBA_REQ_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
- break;
case WMI_ADDBA_RESP_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
break;
- case WMI_DELBA_REQ_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
- break;
case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
@@ -3465,47 +3646,39 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
break;
- case WMI_REMAIN_ON_CHNL_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
- ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
- break;
- case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI,
- "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
- ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
- len, vif);
- break;
- case WMI_TX_STATUS_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
- ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
- break;
- case WMI_RX_PROBE_REQ_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
- ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
- break;
case WMI_P2P_CAPABILITIES_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
ret = ath6kl_wmi_p2p_capabilities_event_rx(datap, len);
break;
- case WMI_RX_ACTION_EVENTID:
- ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
- ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
- break;
case WMI_P2P_INFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
ret = ath6kl_wmi_p2p_info_event_rx(datap, len);
break;
default:
- ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
- ret = -EINVAL;
+ /* may be the event is interface specific */
+ ret = ath6kl_wmi_proc_events_vif(wmi, if_idx, id, datap, len);
break;
}
dev_kfree_skb(skb);
-
return ret;
}
+/* Control Path */
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
+{
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ if (skb->len < sizeof(struct wmi_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ return ath6kl_wmi_proc_events(wmi, skb);
+}
+
void ath6kl_wmi_reset(struct wmi *wmi)
{
spin_lock_bh(&wmi->lock);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 42ac311eda4e..4092e3e80790 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -110,6 +111,8 @@ struct wmi {
u8 fat_pipe_exist;
struct ath6kl *parent_dev;
u8 pwr_mode;
+
+ /* protects fat_pipe_exist and stream_exist_for_ac */
spinlock_t lock;
enum htc_endpoint_id ep_id;
struct sq_threshold_params
@@ -149,8 +152,7 @@ enum wmi_msg_type {
#define WMI_DATA_HDR_PS_MASK 0x1
#define WMI_DATA_HDR_PS_SHIFT 5
-#define WMI_DATA_HDR_MORE_MASK 0x1
-#define WMI_DATA_HDR_MORE_SHIFT 5
+#define WMI_DATA_HDR_MORE 0x20
enum wmi_data_hdr_data_type {
WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
@@ -160,6 +162,13 @@ enum wmi_data_hdr_data_type {
WMI_DATA_HDR_DATA_TYPE_ACL,
};
+/* Bitmap of data header flags */
+enum wmi_data_hdr_flags {
+ WMI_DATA_HDR_FLAGS_MORE = 0x1,
+ WMI_DATA_HDR_FLAGS_EOSP = 0x2,
+ WMI_DATA_HDR_FLAGS_UAPSD = 0x4,
+};
+
#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
@@ -173,8 +182,12 @@ enum wmi_data_hdr_data_type {
#define WMI_DATA_HDR_META_MASK 0x7
#define WMI_DATA_HDR_META_SHIFT 13
+/* Macros for operating on WMI_DATA_HDR (info3) field */
#define WMI_DATA_HDR_IF_IDX_MASK 0xF
+#define WMI_DATA_HDR_TRIG 0x10
+#define WMI_DATA_HDR_EOSP 0x10
+
struct wmi_data_hdr {
s8 rssi;
@@ -203,7 +216,8 @@ struct wmi_data_hdr {
/*
* usage of info3, 16-bit:
* b3:b0 - Interface index
- * b15:b4 - Reserved
+ * b4 - uAPSD trigger in rx & EOSP in tx
+ * b15:b5 - Reserved
*/
__le16 info3;
} __packed;
@@ -257,6 +271,9 @@ static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
#define WMI_META_VERSION_1 0x01
#define WMI_META_VERSION_2 0x02
+/* Flag to signal to FW to calculate TCP checksum */
+#define WMI_META_V2_FLAG_CSUM_OFFLOAD 0x01
+
struct wmi_tx_meta_v1 {
/* packet ID to identify the tx request */
u8 pkt_id;
@@ -646,7 +663,6 @@ enum auth_mode {
WPA2_AUTH_CCKM = 0x40,
};
-#define WMI_MIN_KEY_INDEX 0
#define WMI_MAX_KEY_INDEX 3
#define WMI_MAX_KEY_LEN 32
@@ -984,6 +1000,12 @@ struct wmi_listen_int_cmd {
__le16 num_beacons;
} __packed;
+/* WMI_SET_BMISS_TIME_CMDID */
+struct wmi_bmiss_time_cmd {
+ __le16 bmiss_time;
+ __le16 num_beacons;
+};
+
/* WMI_SET_POWER_MODE_CMDID */
enum wmi_power_mode {
REC_POWER = 0x01,
@@ -1001,7 +1023,7 @@ struct wmi_power_mode_cmd {
*/
enum power_save_fail_event_policy {
SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
- IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
+ IGNORE_PS_FAIL_DURING_SCAN = 2,
};
struct wmi_power_params_cmd {
@@ -1199,7 +1221,7 @@ struct wmi_snr_threshold_params_cmd {
enum wmi_preamble_policy {
WMI_IGNORE_BARKER_IN_ERP = 0,
- WMI_DONOT_IGNORE_BARKER_IN_ERP
+ WMI_FOLLOW_BARKER_IN_ERP,
};
struct wmi_set_lpreamble_cmd {
@@ -1237,6 +1259,15 @@ enum target_event_report_config {
NO_DISCONN_EVT_IN_RECONN
};
+struct wmi_mcast_filter_cmd {
+ u8 mcast_all_enable;
+} __packed;
+
+#define ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE 6
+struct wmi_mcast_filter_add_del_cmd {
+ u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
+} __packed;
+
/* Command Replies */
/* WMI_GET_CHANNEL_LIST_CMDID reply */
@@ -1335,6 +1366,8 @@ enum wmi_event_id {
WMI_P2P_START_SDPD_EVENTID,
WMI_P2P_SDPD_RX_EVENTID,
+ WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID = 0x1047,
+
WMI_THIN_RESERVED_START_EVENTID = 0x8000,
/* Events in this range are reserved for thinmode */
WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
@@ -1903,7 +1936,7 @@ struct wow_filter {
struct wmi_set_ip_cmd {
/* IP in network byte order */
- __le32 ips[MAX_IP_ADDRS];
+ __be32 ips[MAX_IP_ADDRS];
} __packed;
enum ath6kl_wow_filters {
@@ -2104,7 +2137,24 @@ struct wmi_rx_frame_format_cmd {
u8 reserved[1];
} __packed;
+struct wmi_ap_hidden_ssid_cmd {
+ u8 hidden_ssid;
+} __packed;
+
/* AP mode events */
+struct wmi_ap_set_apsd_cmd {
+ u8 enable;
+} __packed;
+
+enum wmi_ap_apsd_buffered_traffic_flags {
+ WMI_AP_APSD_NO_DELIVERY_FRAMES = 0x1,
+};
+
+struct wmi_ap_apsd_buffered_traffic_cmd {
+ __le16 aid;
+ __le16 bitmap;
+ __le32 flags;
+} __packed;
/* WMI_PS_POLL_EVENT */
struct wmi_pspoll_event {
@@ -2321,7 +2371,7 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
- u8 msg_type, bool more_data,
+ u8 msg_type, u32 flags,
enum wmi_data_hdr_data_type data_type,
u8 meta_ver, void *tx_meta_info, u8 if_idx);
@@ -2376,6 +2426,8 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
u16 listen_interval,
u16 listen_beacons);
+int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
+ u16 bmiss_time, u16 num_beacons);
int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
@@ -2417,7 +2469,8 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
s32 ath6kl_wmi_get_rate(s8 rate_index);
-int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
+ __be32 ips0, __be32 ips1);
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_host_mode host_mode);
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
@@ -2425,14 +2478,28 @@ int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
u32 filter, u16 host_req_delay);
int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u8 list_id, u8 filter_size,
- u8 filter_offset, u8 *filter, u8 *mask);
+ u8 filter_offset, const u8 *filter,
+ const u8 *mask);
int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
+int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
+int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
+ u8 *filter, bool add_filter);
+/* AP mode uAPSD */
+int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
+int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi,
+ u8 if_idx, u16 aid,
+ u16 bitmap, u32 flags);
+
+u8 ath6kl_wmi_get_traffic_class(u8 user_priority);
+
+u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri);
/* AP mode */
+int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable);
int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
struct wmi_connect_cmd *p);
@@ -2454,9 +2521,6 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
u32 dur);
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
- u32 wait, const u8 *data, u16 data_len);
-
int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
u32 wait, const u8 *data, u16 data_len,
u32 no_cck);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index dc6be4afe8eb..e507e78398f3 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -6,6 +6,14 @@ config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
+config ATH9K_BTCOEX_SUPPORT
+ bool "Atheros bluetooth coexistence support"
+ depends on (ATH9K || ATH9K_HTC)
+ default y
+ ---help---
+ Say Y, if you want to use the ath9k/ath9k_htc radios together with
+ Bluetooth modules in the same system.
+
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211
@@ -73,6 +81,14 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
+config ATH9K_MAC_DEBUG
+ bool "Atheros MAC statistics"
+ depends on ATH9K_DEBUGFS
+ default y
+ ---help---
+ This option enables collection of statistics for Rx/Tx status
+ data and some other MAC related statistics
+
config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
@@ -81,14 +97,6 @@ config ATH9K_RATE_CONTROL
Say Y, if you want to use the ath9k specific rate control
module instead of minstrel_ht.
-config ATH9K_BTCOEX_SUPPORT
- bool "Atheros ath9k bluetooth coexistence support"
- depends on ATH9K
- default y
- ---help---
- Say Y, if you want to use the ath9k radios together with
- Bluetooth modules in the same system.
-
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index da02242499af..27d95fe5ade0 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -3,9 +3,9 @@ ath9k-y += beacon.o \
init.o \
main.o \
recv.o \
- xmit.o \
- mci.o \
+ xmit.o
+ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
@@ -31,14 +31,14 @@ ath9k_hw-y:= \
eeprom_4k.o \
eeprom_9287.o \
ani.o \
- btcoex.o \
mac.o \
ar9002_mac.o \
ar9003_mac.o \
ar9003_eeprom.o \
- ar9003_paprd.o \
- ar9003_mci.o
+ ar9003_paprd.o
+ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
+ ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index bc56f57b393b..7e0ea4e98334 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -407,20 +407,20 @@ static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
if (aniState->ofdmWeakSigDetectOff) {
if (ath9k_hw_ani_control(ah,
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- true) == true)
+ true))
return;
}
if (aniState->firstepLevel > 0) {
if (ath9k_hw_ani_control(ah,
ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1) == true)
+ aniState->firstepLevel - 1))
return;
}
} else {
if (aniState->firstepLevel > 0) {
if (ath9k_hw_ani_control(ah,
ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1) == true)
+ aniState->firstepLevel - 1))
return;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 86a891f93fc9..d7d8e9199140 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -834,9 +834,10 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
AR_SREV_9287_11_OR_LATER(ah))
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
- if (AR_SREV_9271_10(ah))
- REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
- modesIndex, regWrites);
+ if (AR_SREV_9271_10(ah)) {
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENA);
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_ADC_ON, 0xa);
+ }
ENABLE_REGWRITE_BUFFER(ah);
@@ -858,21 +859,11 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
REGWRITE_BUFFER_FLUSH(ah);
- if (AR_SREV_9271(ah)) {
- if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
- REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
- modesIndex, regWrites);
- else
- REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
- modesIndex, regWrites);
- }
-
REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
- if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
- REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex,
regWrites);
- }
ar5008_hw_override_ini(ah, chan);
ar5008_hw_set_channel_regs(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index e8bdc75405f1..ea4a230997ac 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -459,97 +459,6 @@ static const u32 ar5416Common_9100[][2] = {
{0x0000a3e0, 0x000001ce},
};
-static const u32 ar5416Bank0_9100[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x1e5795e5},
- {0x000098e0, 0x02008020},
-};
-
-static const u32 ar5416BB_RfGain_9100[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00009a00, 0x00000000, 0x00000000},
- {0x00009a04, 0x00000040, 0x00000040},
- {0x00009a08, 0x00000080, 0x00000080},
- {0x00009a0c, 0x000001a1, 0x00000141},
- {0x00009a10, 0x000001e1, 0x00000181},
- {0x00009a14, 0x00000021, 0x000001c1},
- {0x00009a18, 0x00000061, 0x00000001},
- {0x00009a1c, 0x00000168, 0x00000041},
- {0x00009a20, 0x000001a8, 0x000001a8},
- {0x00009a24, 0x000001e8, 0x000001e8},
- {0x00009a28, 0x00000028, 0x00000028},
- {0x00009a2c, 0x00000068, 0x00000068},
- {0x00009a30, 0x00000189, 0x000000a8},
- {0x00009a34, 0x000001c9, 0x00000169},
- {0x00009a38, 0x00000009, 0x000001a9},
- {0x00009a3c, 0x00000049, 0x000001e9},
- {0x00009a40, 0x00000089, 0x00000029},
- {0x00009a44, 0x00000170, 0x00000069},
- {0x00009a48, 0x000001b0, 0x00000190},
- {0x00009a4c, 0x000001f0, 0x000001d0},
- {0x00009a50, 0x00000030, 0x00000010},
- {0x00009a54, 0x00000070, 0x00000050},
- {0x00009a58, 0x00000191, 0x00000090},
- {0x00009a5c, 0x000001d1, 0x00000151},
- {0x00009a60, 0x00000011, 0x00000191},
- {0x00009a64, 0x00000051, 0x000001d1},
- {0x00009a68, 0x00000091, 0x00000011},
- {0x00009a6c, 0x000001b8, 0x00000051},
- {0x00009a70, 0x000001f8, 0x00000198},
- {0x00009a74, 0x00000038, 0x000001d8},
- {0x00009a78, 0x00000078, 0x00000018},
- {0x00009a7c, 0x00000199, 0x00000058},
- {0x00009a80, 0x000001d9, 0x00000098},
- {0x00009a84, 0x00000019, 0x00000159},
- {0x00009a88, 0x00000059, 0x00000199},
- {0x00009a8c, 0x00000099, 0x000001d9},
- {0x00009a90, 0x000000d9, 0x00000019},
- {0x00009a94, 0x000000f9, 0x00000059},
- {0x00009a98, 0x000000f9, 0x00000099},
- {0x00009a9c, 0x000000f9, 0x000000d9},
- {0x00009aa0, 0x000000f9, 0x000000f9},
- {0x00009aa4, 0x000000f9, 0x000000f9},
- {0x00009aa8, 0x000000f9, 0x000000f9},
- {0x00009aac, 0x000000f9, 0x000000f9},
- {0x00009ab0, 0x000000f9, 0x000000f9},
- {0x00009ab4, 0x000000f9, 0x000000f9},
- {0x00009ab8, 0x000000f9, 0x000000f9},
- {0x00009abc, 0x000000f9, 0x000000f9},
- {0x00009ac0, 0x000000f9, 0x000000f9},
- {0x00009ac4, 0x000000f9, 0x000000f9},
- {0x00009ac8, 0x000000f9, 0x000000f9},
- {0x00009acc, 0x000000f9, 0x000000f9},
- {0x00009ad0, 0x000000f9, 0x000000f9},
- {0x00009ad4, 0x000000f9, 0x000000f9},
- {0x00009ad8, 0x000000f9, 0x000000f9},
- {0x00009adc, 0x000000f9, 0x000000f9},
- {0x00009ae0, 0x000000f9, 0x000000f9},
- {0x00009ae4, 0x000000f9, 0x000000f9},
- {0x00009ae8, 0x000000f9, 0x000000f9},
- {0x00009aec, 0x000000f9, 0x000000f9},
- {0x00009af0, 0x000000f9, 0x000000f9},
- {0x00009af4, 0x000000f9, 0x000000f9},
- {0x00009af8, 0x000000f9, 0x000000f9},
- {0x00009afc, 0x000000f9, 0x000000f9},
-};
-
-static const u32 ar5416Bank1_9100[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x02108421},
- {0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2_9100[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x0e73ff17},
- {0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3_9100[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x000098f0, 0x01400018, 0x01c00018},
-};
-
static const u32 ar5416Bank6_9100[][3] = {
/* Addr 5G_HT20 5G_HT40 */
{0x0000989c, 0x00000000, 0x00000000},
@@ -624,13 +533,6 @@ static const u32 ar5416Bank6TPC_9100[][3] = {
{0x000098d0, 0x0000000f, 0x0010000f},
};
-static const u32 ar5416Bank7_9100[][2] = {
- /* Addr allmodes */
- {0x0000989c, 0x00000500},
- {0x0000989c, 0x00000800},
- {0x000098cc, 0x0000000e},
-};
-
static const u32 ar5416Addac_9100[][2] = {
/* Addr allmodes */
{0x0000989c, 0x00000000},
@@ -1113,178 +1015,6 @@ static const u32 ar5416Common_9160[][2] = {
{0x0000a3e0, 0x000001ce},
};
-static const u32 ar5416Bank0_9160[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x1e5795e5},
- {0x000098e0, 0x02008020},
-};
-
-static const u32 ar5416BB_RfGain_9160[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00009a00, 0x00000000, 0x00000000},
- {0x00009a04, 0x00000040, 0x00000040},
- {0x00009a08, 0x00000080, 0x00000080},
- {0x00009a0c, 0x000001a1, 0x00000141},
- {0x00009a10, 0x000001e1, 0x00000181},
- {0x00009a14, 0x00000021, 0x000001c1},
- {0x00009a18, 0x00000061, 0x00000001},
- {0x00009a1c, 0x00000168, 0x00000041},
- {0x00009a20, 0x000001a8, 0x000001a8},
- {0x00009a24, 0x000001e8, 0x000001e8},
- {0x00009a28, 0x00000028, 0x00000028},
- {0x00009a2c, 0x00000068, 0x00000068},
- {0x00009a30, 0x00000189, 0x000000a8},
- {0x00009a34, 0x000001c9, 0x00000169},
- {0x00009a38, 0x00000009, 0x000001a9},
- {0x00009a3c, 0x00000049, 0x000001e9},
- {0x00009a40, 0x00000089, 0x00000029},
- {0x00009a44, 0x00000170, 0x00000069},
- {0x00009a48, 0x000001b0, 0x00000190},
- {0x00009a4c, 0x000001f0, 0x000001d0},
- {0x00009a50, 0x00000030, 0x00000010},
- {0x00009a54, 0x00000070, 0x00000050},
- {0x00009a58, 0x00000191, 0x00000090},
- {0x00009a5c, 0x000001d1, 0x00000151},
- {0x00009a60, 0x00000011, 0x00000191},
- {0x00009a64, 0x00000051, 0x000001d1},
- {0x00009a68, 0x00000091, 0x00000011},
- {0x00009a6c, 0x000001b8, 0x00000051},
- {0x00009a70, 0x000001f8, 0x00000198},
- {0x00009a74, 0x00000038, 0x000001d8},
- {0x00009a78, 0x00000078, 0x00000018},
- {0x00009a7c, 0x00000199, 0x00000058},
- {0x00009a80, 0x000001d9, 0x00000098},
- {0x00009a84, 0x00000019, 0x00000159},
- {0x00009a88, 0x00000059, 0x00000199},
- {0x00009a8c, 0x00000099, 0x000001d9},
- {0x00009a90, 0x000000d9, 0x00000019},
- {0x00009a94, 0x000000f9, 0x00000059},
- {0x00009a98, 0x000000f9, 0x00000099},
- {0x00009a9c, 0x000000f9, 0x000000d9},
- {0x00009aa0, 0x000000f9, 0x000000f9},
- {0x00009aa4, 0x000000f9, 0x000000f9},
- {0x00009aa8, 0x000000f9, 0x000000f9},
- {0x00009aac, 0x000000f9, 0x000000f9},
- {0x00009ab0, 0x000000f9, 0x000000f9},
- {0x00009ab4, 0x000000f9, 0x000000f9},
- {0x00009ab8, 0x000000f9, 0x000000f9},
- {0x00009abc, 0x000000f9, 0x000000f9},
- {0x00009ac0, 0x000000f9, 0x000000f9},
- {0x00009ac4, 0x000000f9, 0x000000f9},
- {0x00009ac8, 0x000000f9, 0x000000f9},
- {0x00009acc, 0x000000f9, 0x000000f9},
- {0x00009ad0, 0x000000f9, 0x000000f9},
- {0x00009ad4, 0x000000f9, 0x000000f9},
- {0x00009ad8, 0x000000f9, 0x000000f9},
- {0x00009adc, 0x000000f9, 0x000000f9},
- {0x00009ae0, 0x000000f9, 0x000000f9},
- {0x00009ae4, 0x000000f9, 0x000000f9},
- {0x00009ae8, 0x000000f9, 0x000000f9},
- {0x00009aec, 0x000000f9, 0x000000f9},
- {0x00009af0, 0x000000f9, 0x000000f9},
- {0x00009af4, 0x000000f9, 0x000000f9},
- {0x00009af8, 0x000000f9, 0x000000f9},
- {0x00009afc, 0x000000f9, 0x000000f9},
-};
-
-static const u32 ar5416Bank1_9160[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x02108421},
- {0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2_9160[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x0e73ff17},
- {0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3_9160[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x000098f0, 0x01400018, 0x01c00018},
-};
-
-static const u32 ar5416Bank6_9160[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00e00000, 0x00e00000},
- {0x0000989c, 0x005e0000, 0x005e0000},
- {0x0000989c, 0x00120000, 0x00120000},
- {0x0000989c, 0x00620000, 0x00620000},
- {0x0000989c, 0x00020000, 0x00020000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x40ff0000, 0x40ff0000},
- {0x0000989c, 0x005f0000, 0x005f0000},
- {0x0000989c, 0x00870000, 0x00870000},
- {0x0000989c, 0x00f90000, 0x00f90000},
- {0x0000989c, 0x007b0000, 0x007b0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00f50000, 0x00f50000},
- {0x0000989c, 0x00dc0000, 0x00dc0000},
- {0x0000989c, 0x00110000, 0x00110000},
- {0x0000989c, 0x006100a8, 0x006100a8},
- {0x0000989c, 0x004210a2, 0x004210a2},
- {0x0000989c, 0x0014008f, 0x0014008f},
- {0x0000989c, 0x00c40003, 0x00c40003},
- {0x0000989c, 0x003000f2, 0x003000f2},
- {0x0000989c, 0x00440016, 0x00440016},
- {0x0000989c, 0x00410040, 0x00410040},
- {0x0000989c, 0x0001805e, 0x0001805e},
- {0x0000989c, 0x0000c0ab, 0x0000c0ab},
- {0x0000989c, 0x000000f1, 0x000000f1},
- {0x0000989c, 0x00002081, 0x00002081},
- {0x0000989c, 0x000000d4, 0x000000d4},
- {0x000098d0, 0x0000000f, 0x0010000f},
-};
-
-static const u32 ar5416Bank6TPC_9160[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00e00000, 0x00e00000},
- {0x0000989c, 0x005e0000, 0x005e0000},
- {0x0000989c, 0x00120000, 0x00120000},
- {0x0000989c, 0x00620000, 0x00620000},
- {0x0000989c, 0x00020000, 0x00020000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x40ff0000, 0x40ff0000},
- {0x0000989c, 0x005f0000, 0x005f0000},
- {0x0000989c, 0x00870000, 0x00870000},
- {0x0000989c, 0x00f90000, 0x00f90000},
- {0x0000989c, 0x007b0000, 0x007b0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00f50000, 0x00f50000},
- {0x0000989c, 0x00dc0000, 0x00dc0000},
- {0x0000989c, 0x00110000, 0x00110000},
- {0x0000989c, 0x006100a8, 0x006100a8},
- {0x0000989c, 0x00423022, 0x00423022},
- {0x0000989c, 0x2014008f, 0x2014008f},
- {0x0000989c, 0x00c40002, 0x00c40002},
- {0x0000989c, 0x003000f2, 0x003000f2},
- {0x0000989c, 0x00440016, 0x00440016},
- {0x0000989c, 0x00410040, 0x00410040},
- {0x0000989c, 0x0001805e, 0x0001805e},
- {0x0000989c, 0x0000c0ab, 0x0000c0ab},
- {0x0000989c, 0x000000e1, 0x000000e1},
- {0x0000989c, 0x00007080, 0x00007080},
- {0x0000989c, 0x000000d4, 0x000000d4},
- {0x000098d0, 0x0000000f, 0x0010000f},
-};
-
-static const u32 ar5416Bank7_9160[][2] = {
- /* Addr allmodes */
- {0x0000989c, 0x00000500},
- {0x0000989c, 0x00000800},
- {0x000098cc, 0x0000000e},
-};
-
static const u32 ar5416Addac_9160[][2] = {
/* Addr allmodes */
{0x0000989c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d190411ac8f5..d9a69fc470cd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -34,74 +34,37 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar9271Modes_9271), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
- ar9271Common_normal_cck_fir_coeff_9271,
- ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
- INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
- ar9271Common_japan_2484_cck_fir_coeff_9271,
- ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
- ar9271Modes_9271_1_0_only,
- ARRAY_SIZE(ar9271Modes_9271_1_0_only), 5);
INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 5);
- INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
- ar9271Modes_high_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 5);
- INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
- ar9271Modes_normal_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 5);
return;
}
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_off_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
ARRAY_SIZE(ar9287Modes_9287_1_1), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
ARRAY_SIZE(ar9287Common_9287_1_1), 2);
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
- 2);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
-
-
INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
ARRAY_SIZE(ar9285Modes_9285_1_2), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
ARRAY_SIZE(ar9285Common_9285_1_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
- 2);
- }
} else if (AR_SREV_9280_20_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
ARRAY_SIZE(ar9280Modes_9280_2), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
ARRAY_SIZE(ar9280Common_9280_2), 2);
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
- }
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9280Modes_fast_clock_9280_2,
ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
@@ -109,22 +72,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar5416Modes_9160), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
ARRAY_SIZE(ar5416Common_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
- ARRAY_SIZE(ar5416Bank0_9160), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
- ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
- ARRAY_SIZE(ar5416Bank1_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
- ARRAY_SIZE(ar5416Bank2_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
- ARRAY_SIZE(ar5416Bank3_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
- ARRAY_SIZE(ar5416Bank6_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
- ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
- ARRAY_SIZE(ar5416Bank7_9160), 2);
if (AR_SREV_9160_11(ah)) {
INIT_INI_ARRAY(&ah->iniAddac,
ar5416Addac_9160_1_1,
@@ -138,22 +85,8 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar5416Modes_9100), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
- ARRAY_SIZE(ar5416Bank0_9100), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
- ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
- ARRAY_SIZE(ar5416Bank1_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
- ARRAY_SIZE(ar5416Bank2_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
- ARRAY_SIZE(ar5416Bank3_9100), 3);
INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
- ARRAY_SIZE(ar5416Bank7_9100), 2);
INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
ARRAY_SIZE(ar5416Addac_9100), 2);
} else {
@@ -161,24 +94,37 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar5416Modes), 5);
INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
+ ARRAY_SIZE(ar5416Bank6TPC), 3);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
+
+ if (!AR_SREV_9280_20_OR_LATER(ah)) {
+ /* Common for AR5416, AR913x, AR9160 */
INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
ARRAY_SIZE(ar5416BB_RfGain), 3);
+
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
+ ARRAY_SIZE(ar5416Bank0), 2);
INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
ARRAY_SIZE(ar5416Bank1), 2);
INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
ARRAY_SIZE(ar5416Bank2), 2);
INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
ARRAY_SIZE(ar5416Bank7), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
+
+ /* Common for AR5416, AR9160 */
+ if (!AR_SREV_9100(ah))
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
+ ARRAY_SIZE(ar5416Bank6), 3);
+
+ /* Common for AR913x, AR9160 */
+ if (!AR_SREV_5416(ah))
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
+ ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
}
/* iniAddac needs to be modified for these chips */
@@ -199,11 +145,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
INI_RA(addac, 31,1) = 0;
}
}
-}
-
-/* Support for Japan ch.14 (2484) spread */
-void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
-{
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniCckfirNormal,
ar9287Common_normal_cck_fir_coeff_9287_1_1,
@@ -243,14 +184,10 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
}
}
-static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
+static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
{
- u32 txgain_type;
-
if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
AR5416_EEP_MINOR_VER_19) {
- txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_high_power_tx_gain_9280_2,
@@ -266,8 +203,22 @@ static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
}
}
+static void ar9271_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
+{
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9271Modes_high_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9271Modes_normal_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 5);
+}
+
static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
{
+ u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9287Modes_rx_gain_9287_1_1,
@@ -275,15 +226,15 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
else if (AR_SREV_9280_20(ah))
ar9280_20_hw_init_rxgain_ini(ah);
- if (AR_SREV_9287_11_OR_LATER(ah)) {
+ if (AR_SREV_9271(ah)) {
+ ar9271_hw_init_txgain_ini(ah, txgain_type);
+ } else if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9287Modes_tx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 5);
} else if (AR_SREV_9280_20(ah)) {
- ar9280_20_hw_init_txgain_ini(ah);
+ ar9280_20_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
- u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
/* txgain table */
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
if (AR_SREV_9285E_20(ah)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 863db321070d..4d18c66a6790 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -925,34 +925,6 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
{0x00004044, 0x00000000},
};
-static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffd},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffc},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9285Modes_9285_1_2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
@@ -1743,34 +1715,6 @@ static const u32 ar9285Modes_XE2_0_high_power[][5] = {
{0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
};
-static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffd},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffc},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9287Modes_9287_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160},
@@ -2512,34 +2456,6 @@ static const u32 ar9287Modes_rx_gain_9287_1_1[][5] = {
{0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067},
};
-static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffd},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffc},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9271Modes_9271[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
@@ -3176,26 +3092,6 @@ static const u32 ar9271Common_9271[][2] = {
{0x0000d384, 0xf3307ff0},
};
-static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
- /* Addr allmodes */
- {0x0000a1f4, 0x00fffeff},
- {0x0000a1f8, 0x00f5f9ff},
- {0x0000a1fc, 0xb79f6427},
-};
-
-static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
- /* Addr allmodes */
- {0x0000a1f4, 0x00000000},
- {0x0000a1f8, 0xefff0301},
- {0x0000a1fc, 0xca9228ee},
-};
-
-static const u32 ar9271Modes_9271_1_0_only[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311},
- {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
-};
-
static const u32 ar9271Modes_9271_ANI_reg[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 7b6417b5212e..aa2abaf31cba 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -347,15 +347,12 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags)
{
struct ar5416_desc *ads = AR5416DESC(ds);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
ads->ds_ctl1 = size & AR_BufLen;
if (flags & ATH9K_RXDESC_INTREQ)
ads->ds_ctl1 |= AR_RxIntrReq;
- ads->ds_rxstatus8 &= ~AR_RxDone;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- memset(&(ads->u), 0, sizeof(ads->u));
+ memset(&ads->u.rx, 0, sizeof(ads->u.rx));
}
EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 453af6dc514b..f9eb2c357169 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -60,6 +60,8 @@
#define AR_PHY_RF_CTL3 0x9828
#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_TO_ADC_ON 0xFF000000
+#define AR_PHY_TX_END_TO_ADC_ON_S 24
#define AR_PHY_ADC_CTL 0x982C
#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 026f9de15d15..46c79a3d4737 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -295,266 +295,6 @@ static const u32 ar9300_2p2_radio_core[][2] = {
{0x00016bd4, 0x00000000},
};
-static const u32 ar9300Common_rx_gain_table_merlin_2p2[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x02000101},
- {0x0000a004, 0x02000102},
- {0x0000a008, 0x02000103},
- {0x0000a00c, 0x02000104},
- {0x0000a010, 0x02000200},
- {0x0000a014, 0x02000201},
- {0x0000a018, 0x02000202},
- {0x0000a01c, 0x02000203},
- {0x0000a020, 0x02000204},
- {0x0000a024, 0x02000205},
- {0x0000a028, 0x02000208},
- {0x0000a02c, 0x02000302},
- {0x0000a030, 0x02000303},
- {0x0000a034, 0x02000304},
- {0x0000a038, 0x02000400},
- {0x0000a03c, 0x02010300},
- {0x0000a040, 0x02010301},
- {0x0000a044, 0x02010302},
- {0x0000a048, 0x02000500},
- {0x0000a04c, 0x02010400},
- {0x0000a050, 0x02020300},
- {0x0000a054, 0x02020301},
- {0x0000a058, 0x02020302},
- {0x0000a05c, 0x02020303},
- {0x0000a060, 0x02020400},
- {0x0000a064, 0x02030300},
- {0x0000a068, 0x02030301},
- {0x0000a06c, 0x02030302},
- {0x0000a070, 0x02030303},
- {0x0000a074, 0x02030400},
- {0x0000a078, 0x02040300},
- {0x0000a07c, 0x02040301},
- {0x0000a080, 0x02040302},
- {0x0000a084, 0x02040303},
- {0x0000a088, 0x02030500},
- {0x0000a08c, 0x02040400},
- {0x0000a090, 0x02050203},
- {0x0000a094, 0x02050204},
- {0x0000a098, 0x02050205},
- {0x0000a09c, 0x02040500},
- {0x0000a0a0, 0x02050301},
- {0x0000a0a4, 0x02050302},
- {0x0000a0a8, 0x02050303},
- {0x0000a0ac, 0x02050400},
- {0x0000a0b0, 0x02050401},
- {0x0000a0b4, 0x02050402},
- {0x0000a0b8, 0x02050403},
- {0x0000a0bc, 0x02050500},
- {0x0000a0c0, 0x02050501},
- {0x0000a0c4, 0x02050502},
- {0x0000a0c8, 0x02050503},
- {0x0000a0cc, 0x02050504},
- {0x0000a0d0, 0x02050600},
- {0x0000a0d4, 0x02050601},
- {0x0000a0d8, 0x02050602},
- {0x0000a0dc, 0x02050603},
- {0x0000a0e0, 0x02050604},
- {0x0000a0e4, 0x02050700},
- {0x0000a0e8, 0x02050701},
- {0x0000a0ec, 0x02050702},
- {0x0000a0f0, 0x02050703},
- {0x0000a0f4, 0x02050704},
- {0x0000a0f8, 0x02050705},
- {0x0000a0fc, 0x02050708},
- {0x0000a100, 0x02050709},
- {0x0000a104, 0x0205070a},
- {0x0000a108, 0x0205070b},
- {0x0000a10c, 0x0205070c},
- {0x0000a110, 0x0205070d},
- {0x0000a114, 0x02050710},
- {0x0000a118, 0x02050711},
- {0x0000a11c, 0x02050712},
- {0x0000a120, 0x02050713},
- {0x0000a124, 0x02050714},
- {0x0000a128, 0x02050715},
- {0x0000a12c, 0x02050730},
- {0x0000a130, 0x02050731},
- {0x0000a134, 0x02050732},
- {0x0000a138, 0x02050733},
- {0x0000a13c, 0x02050734},
- {0x0000a140, 0x02050735},
- {0x0000a144, 0x02050750},
- {0x0000a148, 0x02050751},
- {0x0000a14c, 0x02050752},
- {0x0000a150, 0x02050753},
- {0x0000a154, 0x02050754},
- {0x0000a158, 0x02050755},
- {0x0000a15c, 0x02050770},
- {0x0000a160, 0x02050771},
- {0x0000a164, 0x02050772},
- {0x0000a168, 0x02050773},
- {0x0000a16c, 0x02050774},
- {0x0000a170, 0x02050775},
- {0x0000a174, 0x00000776},
- {0x0000a178, 0x00000776},
- {0x0000a17c, 0x00000776},
- {0x0000a180, 0x00000776},
- {0x0000a184, 0x00000776},
- {0x0000a188, 0x00000776},
- {0x0000a18c, 0x00000776},
- {0x0000a190, 0x00000776},
- {0x0000a194, 0x00000776},
- {0x0000a198, 0x00000776},
- {0x0000a19c, 0x00000776},
- {0x0000a1a0, 0x00000776},
- {0x0000a1a4, 0x00000776},
- {0x0000a1a8, 0x00000776},
- {0x0000a1ac, 0x00000776},
- {0x0000a1b0, 0x00000776},
- {0x0000a1b4, 0x00000776},
- {0x0000a1b8, 0x00000776},
- {0x0000a1bc, 0x00000776},
- {0x0000a1c0, 0x00000776},
- {0x0000a1c4, 0x00000776},
- {0x0000a1c8, 0x00000776},
- {0x0000a1cc, 0x00000776},
- {0x0000a1d0, 0x00000776},
- {0x0000a1d4, 0x00000776},
- {0x0000a1d8, 0x00000776},
- {0x0000a1dc, 0x00000776},
- {0x0000a1e0, 0x00000776},
- {0x0000a1e4, 0x00000776},
- {0x0000a1e8, 0x00000776},
- {0x0000a1ec, 0x00000776},
- {0x0000a1f0, 0x00000776},
- {0x0000a1f4, 0x00000776},
- {0x0000a1f8, 0x00000776},
- {0x0000a1fc, 0x00000776},
- {0x0000b000, 0x02000101},
- {0x0000b004, 0x02000102},
- {0x0000b008, 0x02000103},
- {0x0000b00c, 0x02000104},
- {0x0000b010, 0x02000200},
- {0x0000b014, 0x02000201},
- {0x0000b018, 0x02000202},
- {0x0000b01c, 0x02000203},
- {0x0000b020, 0x02000204},
- {0x0000b024, 0x02000205},
- {0x0000b028, 0x02000208},
- {0x0000b02c, 0x02000302},
- {0x0000b030, 0x02000303},
- {0x0000b034, 0x02000304},
- {0x0000b038, 0x02000400},
- {0x0000b03c, 0x02010300},
- {0x0000b040, 0x02010301},
- {0x0000b044, 0x02010302},
- {0x0000b048, 0x02000500},
- {0x0000b04c, 0x02010400},
- {0x0000b050, 0x02020300},
- {0x0000b054, 0x02020301},
- {0x0000b058, 0x02020302},
- {0x0000b05c, 0x02020303},
- {0x0000b060, 0x02020400},
- {0x0000b064, 0x02030300},
- {0x0000b068, 0x02030301},
- {0x0000b06c, 0x02030302},
- {0x0000b070, 0x02030303},
- {0x0000b074, 0x02030400},
- {0x0000b078, 0x02040300},
- {0x0000b07c, 0x02040301},
- {0x0000b080, 0x02040302},
- {0x0000b084, 0x02040303},
- {0x0000b088, 0x02030500},
- {0x0000b08c, 0x02040400},
- {0x0000b090, 0x02050203},
- {0x0000b094, 0x02050204},
- {0x0000b098, 0x02050205},
- {0x0000b09c, 0x02040500},
- {0x0000b0a0, 0x02050301},
- {0x0000b0a4, 0x02050302},
- {0x0000b0a8, 0x02050303},
- {0x0000b0ac, 0x02050400},
- {0x0000b0b0, 0x02050401},
- {0x0000b0b4, 0x02050402},
- {0x0000b0b8, 0x02050403},
- {0x0000b0bc, 0x02050500},
- {0x0000b0c0, 0x02050501},
- {0x0000b0c4, 0x02050502},
- {0x0000b0c8, 0x02050503},
- {0x0000b0cc, 0x02050504},
- {0x0000b0d0, 0x02050600},
- {0x0000b0d4, 0x02050601},
- {0x0000b0d8, 0x02050602},
- {0x0000b0dc, 0x02050603},
- {0x0000b0e0, 0x02050604},
- {0x0000b0e4, 0x02050700},
- {0x0000b0e8, 0x02050701},
- {0x0000b0ec, 0x02050702},
- {0x0000b0f0, 0x02050703},
- {0x0000b0f4, 0x02050704},
- {0x0000b0f8, 0x02050705},
- {0x0000b0fc, 0x02050708},
- {0x0000b100, 0x02050709},
- {0x0000b104, 0x0205070a},
- {0x0000b108, 0x0205070b},
- {0x0000b10c, 0x0205070c},
- {0x0000b110, 0x0205070d},
- {0x0000b114, 0x02050710},
- {0x0000b118, 0x02050711},
- {0x0000b11c, 0x02050712},
- {0x0000b120, 0x02050713},
- {0x0000b124, 0x02050714},
- {0x0000b128, 0x02050715},
- {0x0000b12c, 0x02050730},
- {0x0000b130, 0x02050731},
- {0x0000b134, 0x02050732},
- {0x0000b138, 0x02050733},
- {0x0000b13c, 0x02050734},
- {0x0000b140, 0x02050735},
- {0x0000b144, 0x02050750},
- {0x0000b148, 0x02050751},
- {0x0000b14c, 0x02050752},
- {0x0000b150, 0x02050753},
- {0x0000b154, 0x02050754},
- {0x0000b158, 0x02050755},
- {0x0000b15c, 0x02050770},
- {0x0000b160, 0x02050771},
- {0x0000b164, 0x02050772},
- {0x0000b168, 0x02050773},
- {0x0000b16c, 0x02050774},
- {0x0000b170, 0x02050775},
- {0x0000b174, 0x00000776},
- {0x0000b178, 0x00000776},
- {0x0000b17c, 0x00000776},
- {0x0000b180, 0x00000776},
- {0x0000b184, 0x00000776},
- {0x0000b188, 0x00000776},
- {0x0000b18c, 0x00000776},
- {0x0000b190, 0x00000776},
- {0x0000b194, 0x00000776},
- {0x0000b198, 0x00000776},
- {0x0000b19c, 0x00000776},
- {0x0000b1a0, 0x00000776},
- {0x0000b1a4, 0x00000776},
- {0x0000b1a8, 0x00000776},
- {0x0000b1ac, 0x00000776},
- {0x0000b1b0, 0x00000776},
- {0x0000b1b4, 0x00000776},
- {0x0000b1b8, 0x00000776},
- {0x0000b1bc, 0x00000776},
- {0x0000b1c0, 0x00000776},
- {0x0000b1c4, 0x00000776},
- {0x0000b1c8, 0x00000776},
- {0x0000b1cc, 0x00000776},
- {0x0000b1d0, 0x00000776},
- {0x0000b1d4, 0x00000776},
- {0x0000b1d8, 0x00000776},
- {0x0000b1dc, 0x00000776},
- {0x0000b1e0, 0x00000776},
- {0x0000b1e4, 0x00000776},
- {0x0000b1e8, 0x00000776},
- {0x0000b1ec, 0x00000776},
- {0x0000b1f0, 0x00000776},
- {0x0000b1f4, 0x00000776},
- {0x0000b1f8, 0x00000776},
- {0x0000b1fc, 0x00000776},
-};
-
static const u32 ar9300_2p2_mac_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
@@ -572,48 +312,6 @@ static const u32 ar9300_2p2_soc_postamble[][5] = {
{0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
};
-static const u32 ar9200_merlin_2p2_radio_core[][2] = {
- /* Addr allmodes */
- {0x00007800, 0x00040000},
- {0x00007804, 0xdb005012},
- {0x00007808, 0x04924914},
- {0x0000780c, 0x21084210},
- {0x00007810, 0x6d801300},
- {0x00007814, 0x0019beff},
- {0x00007818, 0x07e41000},
- {0x0000781c, 0x00392000},
- {0x00007820, 0x92592480},
- {0x00007824, 0x00040000},
- {0x00007828, 0xdb005012},
- {0x0000782c, 0x04924914},
- {0x00007830, 0x21084210},
- {0x00007834, 0x6d801300},
- {0x00007838, 0x0019beff},
- {0x0000783c, 0x07e40000},
- {0x00007840, 0x00392000},
- {0x00007844, 0x92592480},
- {0x00007848, 0x00100000},
- {0x0000784c, 0x773f0567},
- {0x00007850, 0x54214514},
- {0x00007854, 0x12035828},
- {0x00007858, 0x92592692},
- {0x0000785c, 0x00000000},
- {0x00007860, 0x56400000},
- {0x00007864, 0x0a8e370e},
- {0x00007868, 0xc0102850},
- {0x0000786c, 0x812d4000},
- {0x00007870, 0x807ec400},
- {0x00007874, 0x001b6db0},
- {0x00007878, 0x00376b63},
- {0x0000787c, 0x06db6db6},
- {0x00007880, 0x006d8000},
- {0x00007884, 0xffeffffe},
- {0x00007888, 0xffeffffe},
- {0x0000788c, 0x00010000},
- {0x00007890, 0x02060aeb},
- {0x00007894, 0x5a108000},
-};
-
static const u32 ar9300_2p2_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 8e70f0bc073e..63089cc1fafd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -925,7 +925,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
bool txiqcal_done = false, txclcal_done = false;
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
@@ -998,30 +997,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
- if (mci && IS_CHAN_2GHZ(chan) &&
- (mci_hw->bt_state == MCI_BT_AWAKE) &&
- run_agc_cal &&
- !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
-
- u32 pld[4] = {0, 0, 0, 0};
-
- /* send CAL_REQ only when BT is AWAKE. */
- ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
- mci_hw->wlan_cal_seq);
- MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
- pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
- ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
-
- /* Wait BT_CAL_GRANT for 50ms */
- ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n");
-
- if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
- ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n");
- else {
- is_reusable = false;
- ath_dbg(common, MCI, "\nMCI BT is not responding\n");
- }
- }
+ if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ ar9003_mci_init_cal_req(ah, &is_reusable);
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
@@ -1041,19 +1018,8 @@ skip_tx_iqcal:
0, AH_WAIT_TIMEOUT);
}
- if (mci && IS_CHAN_2GHZ(chan) &&
- (mci_hw->bt_state == MCI_BT_AWAKE) &&
- run_agc_cal &&
- !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
-
- u32 pld[4] = {0, 0, 0, 0};
-
- ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
- mci_hw->wlan_cal_done);
- MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
- pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
- ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
- }
+ if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ ar9003_mci_init_cal_done(ah);
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 9fbcbddea165..6bb4db052bb0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3603,10 +3603,6 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
if (AR_SREV_9462(ah)) {
- if (AR_SREV_9462_10(ah)) {
- value &= ~AR_SWITCH_TABLE_COM_SPDT;
- value |= 0x00100000;
- }
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
} else
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index fb937ba93e0c..0f56e322dd3b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -22,7 +22,6 @@
#include "ar9330_1p1_initvals.h"
#include "ar9330_1p2_initvals.h"
#include "ar9580_1p0_initvals.h"
-#include "ar9462_1p0_initvals.h"
#include "ar9462_2p0_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -88,11 +87,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* additional clock settings */
if (ah->is_clk_25mhz)
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniAdditional,
ar9331_1p1_xtal_25M,
ARRAY_SIZE(ar9331_1p1_xtal_25M), 2);
else
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniAdditional,
ar9331_1p1_xtal_40M,
ARRAY_SIZE(ar9331_1p1_xtal_40M), 2);
} else if (AR_SREV_9330_12(ah)) {
@@ -141,11 +140,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* additional clock settings */
if (ah->is_clk_25mhz)
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniAdditional,
ar9331_1p2_xtal_25M,
ARRAY_SIZE(ar9331_1p2_xtal_25M), 2);
else
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniAdditional,
ar9331_1p2_xtal_40M,
ARRAY_SIZE(ar9331_1p2_xtal_40M), 2);
} else if (AR_SREV_9340(ah)) {
@@ -195,15 +194,16 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar9340Modes_high_ob_db_tx_gain_table_1p0),
5);
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9340Modes_fast_clock_1p0,
ARRAY_SIZE(ar9340Modes_fast_clock_1p0),
3);
- INIT_INI_ARRAY(&ah->iniModesAdditional_40M,
- ar9340_1p0_radio_core_40M,
- ARRAY_SIZE(ar9340_1p0_radio_core_40M),
- 2);
+ if (!ah->is_clk_25mhz)
+ INIT_INI_ARRAY(&ah->iniAdditional,
+ ar9340_1p0_radio_core_40M,
+ ARRAY_SIZE(ar9340_1p0_radio_core_40M),
+ 2);
} else if (AR_SREV_9485_11(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -264,63 +264,6 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9485_1_1_pcie_phy_clkreq_disable_L1,
ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
2);
- } else if (AR_SREV_9462_10(ah)) {
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_1p0_mac_core,
- ARRAY_SIZE(ar9462_1p0_mac_core), 2);
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9462_1p0_mac_postamble,
- ARRAY_SIZE(ar9462_1p0_mac_postamble),
- 5);
-
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9462_1p0_baseband_core,
- ARRAY_SIZE(ar9462_1p0_baseband_core),
- 2);
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9462_1p0_baseband_postamble,
- ARRAY_SIZE(ar9462_1p0_baseband_postamble), 5);
-
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9462_1p0_radio_core,
- ARRAY_SIZE(ar9462_1p0_radio_core), 2);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9462_1p0_radio_postamble,
- ARRAY_SIZE(ar9462_1p0_radio_postamble), 5);
-
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9462_1p0_soc_preamble,
- ARRAY_SIZE(ar9462_1p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9462_1p0_soc_postamble,
- ARRAY_SIZE(ar9462_1p0_soc_postamble), 5);
-
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_1p0,
- ARRAY_SIZE(ar9462_common_rx_gain_table_1p0), 2);
-
- /* Awake -> Sleep Setting */
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_pcie_phy_clkreq_disable_L1_1p0,
- ARRAY_SIZE(ar9462_pcie_phy_clkreq_disable_L1_1p0),
- 2);
-
- /* Sleep -> Awake Setting */
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_pcie_phy_clkreq_disable_L1_1p0,
- ARRAY_SIZE(ar9462_pcie_phy_clkreq_disable_L1_1p0),
- 2);
-
- INIT_INI_ARRAY(&ah->iniModesAdditional,
- ar9462_modes_fast_clock_1p0,
- ARRAY_SIZE(ar9462_modes_fast_clock_1p0), 3);
- INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- AR9462_BB_CTX_COEFJ(1p0),
- ARRAY_SIZE(AR9462_BB_CTX_COEFJ(1p0)), 2);
-
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -379,7 +322,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
2);
/* Fast clock modal settings */
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9462_modes_fast_clock_2p0,
ARRAY_SIZE(ar9462_modes_fast_clock_2p0), 3);
@@ -436,7 +379,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
5);
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9580_1p0_modes_fast_clock,
ARRAY_SIZE(ar9580_1p0_modes_fast_clock),
3);
@@ -503,7 +446,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
2);
/* Fast clock modal settings */
- INIT_INI_ARRAY(&ah->iniModesAdditional,
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9300Modes_fast_clock_2p2,
ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
3);
@@ -537,11 +480,6 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
ar9580_1p0_lowest_ob_db_tx_gain_table,
ARRAY_SIZE(ar9580_1p0_lowest_ob_db_tx_gain_table),
5);
- else if (AR_SREV_9462_10(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_low_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9462_modes_low_ob_db_tx_gain_table_1p0),
- 5);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_low_ob_db_tx_gain_table_2p0,
@@ -581,11 +519,6 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
ar9580_1p0_high_ob_db_tx_gain_table,
ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table),
5);
- else if (AR_SREV_9462_10(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_high_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9462_modes_high_ob_db_tx_gain_table_1p0),
- 5);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_high_ob_db_tx_gain_table_2p0,
@@ -712,11 +645,6 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar9580_1p0_rx_gain_table,
ARRAY_SIZE(ar9580_1p0_rx_gain_table),
2);
- else if (AR_SREV_9462_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_1p0,
- ARRAY_SIZE(ar9462_common_rx_gain_table_1p0),
- 2);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0,
@@ -751,11 +679,6 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
ar9485Common_wo_xlna_rx_gain_1_1,
ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
2);
- else if (AR_SREV_9462_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_wo_xlna_rx_gain_table_1p0,
- ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_1p0),
- 2);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_wo_xlna_rx_gain_table_2p0,
@@ -775,14 +698,10 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
{
- if (AR_SREV_9462_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_mixed_rx_gain_table_1p0,
- ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_1p0), 2);
- else if (AR_SREV_9462_20(ah))
+ if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_mixed_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_2p0), 2);
+ ar9462_common_mixed_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_2p0), 2);
}
static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 09b8c9dbf78f..a66a13b76848 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include "hw.h"
#include "ar9003_mac.h"
+#include "ar9003_mci.h"
static void ar9003_hw_rx_enable(struct ath_hw *hw)
{
@@ -28,11 +29,14 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
struct ar9003_txc *ads = ds;
int checksum = 0;
u32 val, ctl12, ctl17;
+ u8 desc_len;
+
+ desc_len = (AR_SREV_9462(ah) ? 0x18 : 0x17);
val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
(1 << AR_TxRxDesc_S) |
(1 << AR_CtrlStat_S) |
- (i->qcu << AR_TxQcuNum_S) | 0x17;
+ (i->qcu << AR_TxQcuNum_S) | desc_len;
checksum += val;
ACCESS_ONCE(ads->info) = val;
@@ -81,6 +85,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ads->ctl20 = 0;
ads->ctl21 = 0;
ads->ctl22 = 0;
+ ads->ctl23 = 0;
ctl17 = SM(i->keytype, AR_EncrType);
if (!i->is_first) {
@@ -176,7 +181,6 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 sync_cause = 0, async_cause;
async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
@@ -298,32 +302,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_hw_bb_watchdog_read(ah);
}
- if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
- u32 raw_intr, rx_msg_intr;
-
- rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
- raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
-
- if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
- ath_dbg(common, MCI,
- "MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
- raw_intr, rx_msg_intr, mci->raw_intr,
- mci->rx_msg_intr);
- else {
- mci->rx_msg_intr |= rx_msg_intr;
- mci->raw_intr |= raw_intr;
- *masked |= ATH9K_INT_MCI;
-
- if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
- mci->cont_status =
- REG_READ(ah, AR_MCI_CONT_STATUS);
-
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
- ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n");
-
- }
- }
+ if (async_cause & AR_INTR_ASYNC_MASK_MCI)
+ ar9003_mci_get_isr(ah, masked);
if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
@@ -346,7 +326,6 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts)
{
- struct ar9003_txc *txc = (struct ar9003_txc *) ds;
struct ar9003_txs *ads;
u32 status;
@@ -356,11 +335,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
- ts->qid = MS(ads->ds_info, AR_TxQcuNum);
- if (!txc || (MS(txc->info, AR_TxQcuNum) == ts->qid))
- ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
- else
- return -ENOENT;
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
@@ -374,6 +349,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ts_seqnum = MS(status, AR_SeqNum);
ts->tid = MS(status, AR_TxTid);
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
ts->desc_id = MS(ads->status1, AR_TxDescId);
ts->ts_tstamp = ads->status4;
ts->ts_status = 0;
@@ -460,20 +436,14 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
unsigned int phyerr;
- /* TODO: byte swap on big endian for ar9300_10 */
-
- if (!rxs) {
- if ((rxsp->status11 & AR_RxDone) == 0)
- return -EINPROGRESS;
-
- if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
- return -EINVAL;
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
- if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
- return -EINPROGRESS;
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
- return 0;
- }
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
rxs->rs_status = 0;
rxs->rs_flags = 0;
@@ -530,7 +500,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
*/
if (rxsp->status11 & AR_CRCErr)
rxs->rs_status |= ATH9K_RXERR_CRC;
- else if (rxsp->status11 & AR_PHYErr) {
+ else if (rxsp->status11 & AR_DecryptCRCErr)
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ else if (rxsp->status11 & AR_MichaelErr)
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+ if (rxsp->status11 & AR_PHYErr) {
phyerr = MS(rxsp->status11, AR_PHYErrCode);
/*
* If we reach a point here where AR_PostDelimCRCErr is
@@ -552,11 +526,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_PHY;
rxs->rs_phyerr = phyerr;
}
-
- } else if (rxsp->status11 & AR_DecryptCRCErr)
- rxs->rs_status |= ATH9K_RXERR_DECRYPT;
- else if (rxsp->status11 & AR_MichaelErr)
- rxs->rs_status |= ATH9K_RXERR_MIC;
+ };
}
if (rxsp->status11 & AR_KeyMiss)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index e203b51e968b..cbf60b090bd9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -92,7 +92,8 @@ struct ar9003_txc {
u32 ctl20; /* DMA control 20 */
u32 ctl21; /* DMA control 21 */
u32 ctl22; /* DMA control 22 */
- u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
+ u32 ctl23; /* DMA control 23 */
+ u32 pad[8]; /* pad to cache line (128 bytes/32 dwords) */
} __packed __aligned(4);
struct ar9003_txs {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 709520c6835b..3cac293a2849 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -16,14 +16,12 @@
#include <linux/export.h>
#include "hw.h"
+#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_mci.h"
static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
{
- if (!AR_SREV_9462_20(ah))
- return;
-
REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
udelay(1);
@@ -37,13 +35,10 @@ static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
struct ath_common *common = ath9k_hw_common(ah);
while (time_out) {
-
if (REG_READ(ah, address) & bit_position) {
-
REG_WRITE(ah, address, bit_position);
if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
-
if (bit_position &
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
ar9003_mci_reset_req_wakeup(ah);
@@ -81,25 +76,19 @@ static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
return time_out;
}
-void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+static void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
{
u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
- if (!ATH9K_HW_CAP_MCI)
- return;
-
ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
wait_done, false);
udelay(5);
}
-void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+static void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
{
u32 payload = 0x00000000;
- if (!ATH9K_HW_CAP_MCI)
- return;
-
ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
wait_done, false);
}
@@ -111,11 +100,8 @@ static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
udelay(5);
}
-void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+static void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
{
- if (!ATH9K_HW_CAP_MCI)
- return;
-
ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
NULL, 0, wait_done, false);
}
@@ -138,30 +124,27 @@ static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
bool wait_done)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
if (!mci->bt_version_known &&
- (mci->bt_state != MCI_BT_SLEEP)) {
- ath_dbg(common, MCI, "MCI Send Coex version query\n");
+ (mci->bt_state != MCI_BT_SLEEP)) {
MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+ MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_QUERY);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true);
+ wait_done, true);
}
}
static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
- bool wait_done)
+ bool wait_done)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
- ath_dbg(common, MCI, "MCI Send Coex version response\n");
MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_VERSION_RESPONSE);
+ MCI_GPM_COEX_VERSION_RESPONSE);
*(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
mci->wlan_ver_major;
*(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
@@ -170,15 +153,16 @@ static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
}
static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
- bool wait_done)
+ bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *payload = &mci->wlan_channels[0];
if ((mci->wlan_channels_update == true) &&
- (mci->bt_state != MCI_BT_SLEEP)) {
+ (mci->bt_state != MCI_BT_SLEEP)) {
MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+ MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_WLAN_CHANNELS);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
wait_done, true);
MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
@@ -188,7 +172,6 @@ static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
bool wait_done, u8 query_type)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
@@ -196,25 +179,19 @@ static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
if (mci->bt_state != MCI_BT_SLEEP) {
- ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n",
- query_type);
-
- MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_STATUS_QUERY);
*(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+
/*
* If bt_status_query message is not sent successfully,
* then need_flush_btinfo should be set again.
*/
if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
wait_done, true)) {
- if (query_btinfo) {
+ if (query_btinfo)
mci->need_flush_btinfo = true;
-
- ath_dbg(common, MCI,
- "MCI send bt_status_query fail, set flush flag again\n");
- }
}
if (query_btinfo)
@@ -222,21 +199,14 @@ static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
}
}
-void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
- bool wait_done)
+static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
- if (!ATH9K_HW_CAP_MCI)
- return;
-
- ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n",
- (halt) ? "halt" : "unhalt");
-
- MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_HALT_BT_GPM);
if (halt) {
mci->query_bt = true;
@@ -252,7 +222,6 @@ void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
}
-
static void ar9003_mci_prep_interface(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -269,30 +238,14 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
REG_READ(ah, AR_MCI_INTERRUPT_RAW));
- /* Remote Reset */
- ath_dbg(common, MCI, "MCI Reset sequence start\n");
- ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
ar9003_mci_remote_reset(ah, true);
-
- /*
- * This delay is required for the reset delay worst case value 255 in
- * MCI_COMMAND2 register
- */
-
- if (AR_SREV_9462_10(ah))
- udelay(252);
-
- ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
ar9003_mci_send_req_wake(ah, true);
if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
- ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n");
mci->bt_state = MCI_BT_AWAKE;
- if (AR_SREV_9462_10(ah))
- udelay(10);
/*
* we don't need to send more remote_reset at this moment.
* If BT receive first remote_reset, then BT HW will
@@ -309,11 +262,6 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
* Similarly, if in any case, WLAN can receive BT's sys_waking,
* that means WLAN's RX is also fine.
*/
-
- /* Send SYS_WAKING to BT */
-
- ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n");
-
ar9003_mci_send_sys_waking(ah, true);
udelay(10);
@@ -321,7 +269,6 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
* Set BT priority interrupt value to be 0xff to
* avoid having too many BT PRIORITY interrupts.
*/
-
REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
@@ -339,77 +286,70 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
AR_MCI_INTERRUPT_BT_PRI);
- if (AR_SREV_9462_10(ah) || mci->is_2g) {
- /* Send LNA_TRANS */
- ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n");
+ if (mci->is_2g) {
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
}
- if (AR_SREV_9462_10(ah) || (mci->is_2g &&
- !mci->update_2g5g)) {
+ if ((mci->is_2g && !mci->update_2g5g)) {
if (ar9003_mci_wait_for_interrupt(ah,
- AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
- mci_timeout))
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
ath_dbg(common, MCI,
"MCI WLAN has control over the LNA & BT obeys it\n");
else
ath_dbg(common, MCI,
"MCI BT didn't respond to LNA_TRANS\n");
}
-
- if (AR_SREV_9462_10(ah)) {
- /* Send another remote_reset to deassert BT clk_req. */
- ath_dbg(common, MCI,
- "MCI another remote_reset to deassert clk_req\n");
- ar9003_mci_remote_reset(ah, true);
- udelay(252);
- }
}
/* Clear the extra redundant SYS_WAKING from BT */
if ((mci->bt_state == MCI_BT_AWAKE) &&
(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
- (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
-
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
}
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
}
-void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+void ar9003_mci_set_full_sleep(struct ath_hw *ah)
{
- if (!ATH9K_HW_CAP_MCI)
- return;
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ (mci->bt_state != MCI_BT_SLEEP) &&
+ !mci->halted_bt_gpm) {
+ ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
+ }
+
+ mci->ready = false;
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+}
+
+static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
}
-void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+static void ar9003_mci_enable_interrupt(struct ath_hw *ah)
{
- if (!ATH9K_HW_CAP_MCI)
- return;
-
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
}
-bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+static bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
{
u32 intr;
- if (!ATH9K_HW_CAP_MCI)
- return false;
-
intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
return ((intr & ints) == ints);
}
@@ -419,9 +359,6 @@ void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (!ATH9K_HW_CAP_MCI)
- return;
-
*raw_intr = mci->raw_intr;
*rx_msg_intr = mci->rx_msg_intr;
@@ -431,12 +368,34 @@ void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
}
EXPORT_SYMBOL(ar9003_mci_get_interrupt);
-void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 raw_intr, rx_msg_intr;
- if (!ATH9K_HW_CAP_MCI)
- return;
+ rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+ if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef)) {
+ ath_dbg(common, MCI,
+ "MCI gets 0xdeadbeef during int processing\n");
+ } else {
+ mci->rx_msg_intr |= rx_msg_intr;
+ mci->raw_intr |= raw_intr;
+ *masked |= ATH9K_INT_MCI;
+
+ if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+ mci->cont_status = REG_READ(ah, AR_MCI_CONT_STATUS);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+ }
+}
+
+static void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (!mci->update_2g5g &&
(mci->is_2g != is_2g))
@@ -447,7 +406,6 @@ void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *payload;
u32 recv_type, offset;
@@ -460,10 +418,8 @@ static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
payload = (u32 *)(mci->gpm_buf + offset);
recv_type = MCI_GPM_TYPE(payload);
- if (recv_type == MCI_GPM_RSVD_PATTERN) {
- ath_dbg(common, MCI, "MCI Skip RSVD GPM\n");
+ if (recv_type == MCI_GPM_RSVD_PATTERN)
return false;
- }
return true;
}
@@ -471,42 +427,31 @@ static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
static void ar9003_mci_observation_set_up(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
- ath9k_hw_cfg_output(ah, 3,
- AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
-
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
-
ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
-
ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
-
} else
return;
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
- if (AR_SREV_9462_20_OR_LATER(ah)) {
- REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
- AR_GLB_DS_JTAG_DISABLE, 1);
- REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
- AR_GLB_WLAN_UART_INTF_EN, 0);
- REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
- ATH_MCI_CONFIG_MCI_OBS_GPIO);
- }
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0);
+ REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL, ATH_MCI_CONFIG_MCI_OBS_GPIO);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
@@ -520,13 +465,12 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
}
static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
- u8 opcode, u32 bt_flags)
+ u8 opcode, u32 bt_flags)
{
- struct ath_common *common = ath9k_hw_common(ah);
u32 pld[4] = {0, 0, 0, 0};
- MCI_GPM_SET_TYPE_OPCODE(pld,
- MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+ MCI_GPM_SET_TYPE_OPCODE(pld, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS);
*(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
@@ -534,32 +478,360 @@ static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
- ath_dbg(common, MCI,
- "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
- opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" :
- opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR",
- bt_flags);
-
return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
- wait_done, true);
+ wait_done, true);
}
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
- bool is_full_sleep)
+static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 cur_bt_state;
+
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+ if (mci->bt_state != cur_bt_state)
+ mci->bt_state = cur_bt_state;
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm == true)
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+}
+
+void ar9003_mci_check_bt(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ if (!mci_hw->ready)
+ return;
+
+ /*
+ * check BT state again to make
+ * sure it's not changed.
+ */
+ ar9003_mci_sync_bt_state(ah);
+ ar9003_mci_2g5g_switch(ah, true);
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (mci_hw->query_bt == true)) {
+ mci_hw->need_flush_btinfo = true;
+ }
+}
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, u32 *p_gpm)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 regval, thresh;
+ u8 *p_data = (u8 *) p_gpm;
- if (!ATH9K_HW_CAP_MCI)
+ if (gpm_type != MCI_GPM_COEX_AGENT)
return;
- ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n",
- is_full_sleep, is_2g);
+ switch (gpm_opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ mci->bt_ver_major =
+ *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+ mci->bt_ver_minor =
+ *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02X\n",
+ *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ mci->query_bt = true;
+ ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ mci->query_bt = true;
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
+ *(p_gpm + 3));
+ break;
+ default:
+ break;
+ }
+}
+
+static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 offset;
+ u8 recv_type = 0, recv_opcode = 0;
+ bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+ more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+ while (time_out > 0) {
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (more_data != MCI_GPM_MORE)
+ time_out = ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM,
+ time_out);
+
+ if (!time_out)
+ break;
+
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ continue;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+ if (recv_type == gpm_type) {
+ if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+ !b_is_bt_cal_done) {
+ gpm_type = MCI_GPM_BT_CAL_GRANT;
+ continue;
+ }
+ break;
+ }
+ } else if ((recv_type == gpm_type) && (recv_opcode == gpm_opcode)) {
+ break;
+ }
+
+ /*
+ * check if it's cal_grant
+ *
+ * When we're waiting for cal_grant in reset routine,
+ * it's possible that BT sends out cal_request at the
+ * same time. Since BT's calibration doesn't happen
+ * that often, we'll let BT completes calibration then
+ * we continue to wait for cal_grant from BT.
+ * Orginal: Wait BT_CAL_GRANT.
+ * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+ * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+ */
+
+ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+ (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+ u32 payload[4] = {0, 0, 0, 0};
+
+ gpm_type = MCI_GPM_BT_CAL_DONE;
+ MCI_GPM_SET_CAL_TYPE(payload,
+ MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ false, false);
+ continue;
+ } else {
+ ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
+ *(p_gpm + 1));
+ mismatch++;
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+ }
+ }
+
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (time_out <= 0)
+ time_out = 0;
+
+ while (more_data == MCI_GPM_MORE) {
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+
+ MCI_GPM_RECYCLE(p_gpm);
+ }
+
+ return time_out;
+}
+
+bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+ if (mci_hw->bt_state != MCI_BT_CAL_START)
+ return false;
+
+ mci_hw->bt_state = MCI_BT_CAL;
+
+ /*
+ * MCI FIX: disable mci interrupt here. This is to avoid
+ * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+ * lead to mci_intr reentry.
+ */
+ ar9003_mci_disable_interrupt(ah);
+
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+ 16, true, false);
+
+ /* Wait BT calibration to be completed for 25ms */
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+ 0, 25000))
+ ath_dbg(common, MCI, "MCI BT_CAL_DONE received\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT_CAL_DONE not received\n");
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+ /* MCI FIX: enable mci interrupt here */
+ ar9003_mci_enable_interrupt(ah);
+
+ return true;
+}
+
+int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ if (!mci_hw->ready)
+ return 0;
+
+ if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
+ goto exit;
+
+ if (ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+ ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
+
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_hist.num_readings = 0;
+ }
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ }
+exit:
+ ar9003_mci_enable_interrupt(ah);
+ return 0;
+}
+
+static void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+ /* disable all MCI messages */
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ /* wait pending HW messages to flush out */
+ udelay(10);
/*
- * GPM buffer and scheduling message buffer are not allocated
+ * Send LNA_TAKE and SYS_SLEEPING when
+ * 1. reset not after resuming from full sleep
+ * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
*/
+ ar9003_mci_send_lna_take(ah, true);
+
+ udelay(5);
+
+ ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 thresh;
+
+ if (enable) {
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+ thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
+ } else {
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
+ } else {
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 regval;
+
+ ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
+ is_full_sleep, is_2g);
if (!mci->gpm_addr && !mci->sched_addr) {
ath_dbg(common, MCI,
@@ -568,7 +840,7 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
}
if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
- ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n");
+ ath_dbg(common, MCI, "BTCOEX control register is dead\n");
return;
}
@@ -592,49 +864,23 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
- if (is_2g && (AR_SREV_9462_20(ah)) &&
- !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
-
- regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
- ath_dbg(common, MCI, "MCI sched one step look ahead\n");
-
- if (!(mci->config &
- ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
-
- thresh = MS(mci->config,
- ATH_MCI_CONFIG_AGGR_THRESH);
- thresh &= 7;
- regval |= SM(1,
- AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
- regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
-
- REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
- AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
- REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
- AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
-
- } else
- ath_dbg(common, MCI, "MCI sched aggr thresh: off\n");
- } else
- ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n");
-
- if (AR_SREV_9462_10(ah))
- regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
-
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
- if (AR_SREV_9462_20(ah)) {
- REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_SPDT_ENABLE);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
- AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
- }
+ if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
+ ar9003_mci_osla_setup(ah, true);
+ else
+ ar9003_mci_osla_setup(ah, false);
+
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_SPDT_ENABLE);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+ AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
- thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
- REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+ regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+ REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
/* Resetting the Rx and Tx paths of MCI */
@@ -659,15 +905,15 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
- AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
- if (AR_SREV_9462_20_OR_LATER(ah))
- ar9003_mci_observation_set_up(ah);
+ ar9003_mci_observation_set_up(ah);
mci->ready = true;
ar9003_mci_prep_interface(ah);
@@ -676,79 +922,28 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
ar9003_mci_enable_interrupt(ah);
}
-void ar9003_mci_mute_bt(struct ath_hw *ah)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!ATH9K_HW_CAP_MCI)
- return;
-
- /* disable all MCI messages */
- REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
- REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
-
- /* wait pending HW messages to flush out */
- udelay(10);
-
- /*
- * Send LNA_TAKE and SYS_SLEEPING when
- * 1. reset not after resuming from full sleep
- * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
- */
-
- ath_dbg(common, MCI, "MCI Send LNA take\n");
- ar9003_mci_send_lna_take(ah, true);
-
- udelay(5);
-
- ath_dbg(common, MCI, "MCI Send sys sleeping\n");
- ar9003_mci_send_sys_sleeping(ah, true);
-}
-
-void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 cur_bt_state;
-
- if (!ATH9K_HW_CAP_MCI)
- return;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
- cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+ ar9003_mci_disable_interrupt(ah);
- if (mci->bt_state != cur_bt_state) {
- ath_dbg(common, MCI,
- "MCI BT state mismatches. old: %d, new: %d\n",
- mci->bt_state, cur_bt_state);
- mci->bt_state = cur_bt_state;
+ if (mci_hw->ready && !save_fullsleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(20);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
}
- if (mci->bt_state != MCI_BT_SLEEP) {
-
- ar9003_mci_send_coex_version_query(ah, true);
- ar9003_mci_send_coex_wlan_channels(ah, true);
-
- if (mci->unhalt_bt_gpm == true) {
- ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
- ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
- }
- }
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ mci_hw->ready = false;
}
static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 new_flags, to_set, to_clear;
- if (AR_SREV_9462_20(ah) &&
- mci->update_2g5g &&
- (mci->bt_state != MCI_BT_SLEEP)) {
-
+ if (mci->update_2g5g && (mci->bt_state != MCI_BT_SLEEP)) {
if (mci->is_2g) {
new_flags = MCI_2G_FLAGS;
to_clear = MCI_2G_FLAGS_CLEAR_MASK;
@@ -759,44 +954,23 @@ static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
to_set = MCI_5G_FLAGS_SET_MASK;
}
- ath_dbg(common, MCI,
- "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
- mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
-
if (to_clear)
ar9003_mci_send_coex_bt_flags(ah, wait_done,
- MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
-
+ MCI_GPM_COEX_BT_FLAGS_CLEAR,
+ to_clear);
if (to_set)
ar9003_mci_send_coex_bt_flags(ah, wait_done,
- MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ to_set);
}
-
- if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
- mci->update_2g5g = false;
}
static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
u32 *payload, bool queue)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u8 type, opcode;
- if (queue) {
-
- if (payload)
- ath_dbg(common, MCI,
- "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
- header,
- *(((u8 *)payload) + 4),
- *(((u8 *)payload) + 5),
- *(((u8 *)payload) + 6));
- else
- ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n",
- header);
- }
-
/* check if the message is to be queued */
if (header != MCI_GPM)
return;
@@ -809,64 +983,29 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
switch (opcode) {
case MCI_GPM_COEX_BT_UPDATE_FLAGS:
-
- if (AR_SREV_9462_10(ah))
- break;
-
if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
- MCI_GPM_COEX_BT_FLAGS_READ)
+ MCI_GPM_COEX_BT_FLAGS_READ)
break;
mci->update_2g5g = queue;
- if (queue)
- ath_dbg(common, MCI,
- "MCI BT_MCI_FLAGS: 2G5G status <queued> %s\n",
- mci->is_2g ? "2G" : "5G");
- else
- ath_dbg(common, MCI,
- "MCI BT_MCI_FLAGS: 2G5G status <sent> %s\n",
- mci->is_2g ? "2G" : "5G");
-
break;
-
case MCI_GPM_COEX_WLAN_CHANNELS:
-
mci->wlan_channels_update = queue;
- if (queue)
- ath_dbg(common, MCI, "MCI WLAN channel map <queued>\n");
- else
- ath_dbg(common, MCI, "MCI WLAN channel map <sent>\n");
break;
-
case MCI_GPM_COEX_HALT_BT_GPM:
-
if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
- MCI_GPM_COEX_BT_GPM_UNHALT) {
-
+ MCI_GPM_COEX_BT_GPM_UNHALT) {
mci->unhalt_bt_gpm = queue;
- if (queue)
- ath_dbg(common, MCI,
- "MCI UNHALT BT GPM <queued>\n");
- else {
+ if (!queue)
mci->halted_bt_gpm = false;
- ath_dbg(common, MCI,
- "MCI UNHALT BT GPM <sent>\n");
- }
}
if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
MCI_GPM_COEX_BT_GPM_HALT) {
mci->halted_bt_gpm = !queue;
-
- if (queue)
- ath_dbg(common, MCI,
- "MCI HALT BT GPM <not sent>\n");
- else
- ath_dbg(common, MCI,
- "MCI UNHALT BT GPM <sent>\n");
}
break;
@@ -877,46 +1016,33 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (!ATH9K_HW_CAP_MCI)
- return;
-
if (mci->update_2g5g) {
if (mci->is_2g) {
-
ar9003_mci_send_2g5g_status(ah, true);
- ath_dbg(common, MCI, "MCI Send LNA trans\n");
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- if (AR_SREV_9462_20(ah)) {
- REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- if (!(mci->config &
- ATH_MCI_CONFIG_DISABLE_OSLA)) {
- REG_SET_BIT(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
- }
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
}
} else {
- ath_dbg(common, MCI, "MCI Send LNA take\n");
ar9003_mci_send_lna_take(ah, true);
udelay(5);
REG_SET_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
-
- if (AR_SREV_9462_20(ah)) {
- REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
- }
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
ar9003_mci_send_2g5g_status(ah, true);
}
@@ -934,28 +1060,19 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 saved_mci_int_en;
int i;
- if (!ATH9K_HW_CAP_MCI)
- return false;
-
saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
regval = REG_READ(ah, AR_BTCOEX_CTRL);
if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
-
ath_dbg(common, MCI,
"MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
- header,
- (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
-
+ header, (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
return false;
-
} else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
-
ath_dbg(common, MCI,
"MCI Don't send message 0x%x. BT is in sleep state\n",
header);
-
ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
return false;
}
@@ -983,7 +1100,7 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
if (wait_done &&
!(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+ AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
else {
ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
@@ -997,220 +1114,64 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
}
EXPORT_SYMBOL(ar9003_mci_send_message);
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
- u16 len, u32 sched_addr)
+void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
{
- struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 pld[4] = {0, 0, 0, 0};
- if (!ATH9K_HW_CAP_MCI)
+ if ((mci_hw->bt_state != MCI_BT_AWAKE) ||
+ (mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL))
return;
- mci->gpm_addr = gpm_addr;
- mci->gpm_buf = gpm_buf;
- mci->gpm_len = len;
- mci->sched_addr = sched_addr;
- mci->sched_buf = sched_buf;
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
- ar9003_mci_reset(ah, true, true, true);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
+ ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
+ } else {
+ is_reusable = false;
+ ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
+ }
}
-EXPORT_SYMBOL(ar9003_mci_setup);
-void ar9003_mci_cleanup(struct ath_hw *ah)
+void ar9003_mci_init_cal_done(struct ath_hw *ah)
{
- struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 pld[4] = {0, 0, 0, 0};
- if (!ATH9K_HW_CAP_MCI)
+ if ((mci_hw->bt_state != MCI_BT_AWAKE) ||
+ (mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL))
return;
- /* Turn off MCI and Jupiter mode. */
- REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
- ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n");
- ar9003_mci_disable_interrupt(ah);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
}
-EXPORT_SYMBOL(ar9003_mci_cleanup);
-static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
- u8 gpm_opcode, u32 *p_gpm)
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u8 *p_data = (u8 *) p_gpm;
- if (gpm_type != MCI_GPM_COEX_AGENT)
- return;
+ mci->gpm_addr = gpm_addr;
+ mci->gpm_buf = gpm_buf;
+ mci->gpm_len = len;
+ mci->sched_addr = sched_addr;
- switch (gpm_opcode) {
- case MCI_GPM_COEX_VERSION_QUERY:
- ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
- ar9003_mci_send_coex_version_response(ah, true);
- break;
- case MCI_GPM_COEX_VERSION_RESPONSE:
- ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
- mci->bt_ver_major =
- *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
- mci->bt_ver_minor =
- *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
- mci->bt_version_known = true;
- ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
- mci->bt_ver_major, mci->bt_ver_minor);
- break;
- case MCI_GPM_COEX_STATUS_QUERY:
- ath_dbg(common, MCI,
- "MCI Recv GPM COEX Status Query = 0x%02X\n",
- *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
- mci->wlan_channels_update = true;
- ar9003_mci_send_coex_wlan_channels(ah, true);
- break;
- case MCI_GPM_COEX_BT_PROFILE_INFO:
- mci->query_bt = true;
- ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
- break;
- case MCI_GPM_COEX_BT_STATUS_UPDATE:
- mci->query_bt = true;
- ath_dbg(common, MCI,
- "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
- *(p_gpm + 3));
- break;
- default:
- break;
- }
+ ar9003_mci_reset(ah, true, true, true);
}
+EXPORT_SYMBOL(ar9003_mci_setup);
-u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
- u8 gpm_opcode, int time_out)
+void ar9003_mci_cleanup(struct ath_hw *ah)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 *p_gpm = NULL, mismatch = 0, more_data;
- u32 offset;
- u8 recv_type = 0, recv_opcode = 0;
- bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
-
- if (!ATH9K_HW_CAP_MCI)
- return 0;
-
- more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
-
- while (time_out > 0) {
- if (p_gpm) {
- MCI_GPM_RECYCLE(p_gpm);
- p_gpm = NULL;
- }
-
- if (more_data != MCI_GPM_MORE)
- time_out = ar9003_mci_wait_for_interrupt(ah,
- AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_GPM,
- time_out);
-
- if (!time_out)
- break;
-
- offset = ar9003_mci_state(ah,
- MCI_STATE_NEXT_GPM_OFFSET, &more_data);
-
- if (offset == MCI_GPM_INVALID)
- continue;
-
- p_gpm = (u32 *) (mci->gpm_buf + offset);
- recv_type = MCI_GPM_TYPE(p_gpm);
- recv_opcode = MCI_GPM_OPCODE(p_gpm);
-
- if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
-
- if (recv_type == gpm_type) {
-
- if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
- !b_is_bt_cal_done) {
- gpm_type = MCI_GPM_BT_CAL_GRANT;
- ath_dbg(common, MCI,
- "MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n");
- continue;
- }
-
- break;
- }
- } else if ((recv_type == gpm_type) &&
- (recv_opcode == gpm_opcode))
- break;
-
- /* not expected message */
-
- /*
- * check if it's cal_grant
- *
- * When we're waiting for cal_grant in reset routine,
- * it's possible that BT sends out cal_request at the
- * same time. Since BT's calibration doesn't happen
- * that often, we'll let BT completes calibration then
- * we continue to wait for cal_grant from BT.
- * Orginal: Wait BT_CAL_GRANT.
- * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
- * BT_CAL_DONE -> Wait BT_CAL_GRANT.
- */
-
- if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
- (recv_type == MCI_GPM_BT_CAL_REQ)) {
-
- u32 payload[4] = {0, 0, 0, 0};
-
- gpm_type = MCI_GPM_BT_CAL_DONE;
- ath_dbg(common, MCI,
- "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
-
- MCI_GPM_SET_CAL_TYPE(payload,
- MCI_GPM_WLAN_CAL_GRANT);
-
- ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- false, false);
-
- ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n");
-
- continue;
- } else {
- ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
- *(p_gpm + 1));
- mismatch++;
- ar9003_mci_process_gpm_extra(ah, recv_type,
- recv_opcode, p_gpm);
- }
- }
- if (p_gpm) {
- MCI_GPM_RECYCLE(p_gpm);
- p_gpm = NULL;
- }
-
- if (time_out <= 0) {
- time_out = 0;
- ath_dbg(common, MCI,
- "MCI GPM received timeout, mismatch = %d\n", mismatch);
- } else
- ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n",
- gpm_type, gpm_opcode);
-
- while (more_data == MCI_GPM_MORE) {
-
- ath_dbg(common, MCI, "MCI discard remaining GPM\n");
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
-
- if (offset == MCI_GPM_INVALID)
- break;
-
- p_gpm = (u32 *) (mci->gpm_buf + offset);
- recv_type = MCI_GPM_TYPE(p_gpm);
- recv_opcode = MCI_GPM_OPCODE(p_gpm);
-
- if (!MCI_GPM_IS_CAL_TYPE(recv_type))
- ar9003_mci_process_gpm_extra(ah, recv_type,
- recv_opcode, p_gpm);
-
- MCI_GPM_RECYCLE(p_gpm);
- }
-
- return time_out;
+ /* Turn off MCI and Jupiter mode. */
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+ ar9003_mci_disable_interrupt(ah);
}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
{
@@ -1219,13 +1180,9 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
u32 value = 0, more_gpm = 0, gpm_ptr;
u8 query_type;
- if (!ATH9K_HW_CAP_MCI)
- return 0;
-
switch (state_type) {
case MCI_STATE_ENABLE:
if (mci->ready) {
-
value = REG_READ(ah, AR_BTCOEX_CTRL);
if ((value == 0xdeadbeef) || (value == 0xffffffff))
@@ -1235,7 +1192,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
break;
case MCI_STATE_INIT_GPM_OFFSET:
value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
- ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value);
mci->gpm_idx = value;
break;
case MCI_STATE_NEXT_GPM_OFFSET:
@@ -1258,32 +1214,21 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
if (value == 0)
value = mci->gpm_len - 1;
else if (value >= mci->gpm_len) {
- if (value != 0xFFFF) {
+ if (value != 0xFFFF)
value = 0;
- ath_dbg(common, MCI,
- "MCI GPM offset out of range\n");
- }
- } else
+ } else {
value--;
+ }
if (value == 0xFFFF) {
value = MCI_GPM_INVALID;
more_gpm = MCI_GPM_NOMORE;
- ath_dbg(common, MCI,
- "MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n",
- gpm_ptr, value);
} else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
-
if (gpm_ptr == mci->gpm_idx) {
value = MCI_GPM_INVALID;
more_gpm = MCI_GPM_NOMORE;
-
- ath_dbg(common, MCI,
- "MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n",
- gpm_ptr, value);
} else {
for (;;) {
-
u32 temp_index;
/* skip reserved GPM if any */
@@ -1300,13 +1245,8 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
mci->gpm_len)
mci->gpm_idx = 0;
- ath_dbg(common, MCI,
- "MCI GPM message got ptr=%d, @offset=%d, more=%d\n",
- gpm_ptr, temp_index,
- (more_gpm == MCI_GPM_MORE));
-
if (ar9003_mci_is_gpm_valid(ah,
- temp_index)) {
+ temp_index)) {
value = temp_index;
break;
}
@@ -1331,79 +1271,59 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
/* Make it in bytes */
value <<= 4;
break;
-
case MCI_STATE_REMOTE_SLEEP:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_REMOTE_SLEEP) ?
MCI_BT_SLEEP : MCI_BT_AWAKE;
break;
-
case MCI_STATE_CONT_RSSI_POWER:
value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
- break;
-
+ break;
case MCI_STATE_CONT_PRIORITY:
value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
break;
-
case MCI_STATE_CONT_TXRX:
value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
break;
-
case MCI_STATE_BT:
value = mci->bt_state;
break;
-
case MCI_STATE_SET_BT_SLEEP:
mci->bt_state = MCI_BT_SLEEP;
break;
-
case MCI_STATE_SET_BT_AWAKE:
mci->bt_state = MCI_BT_AWAKE;
ar9003_mci_send_coex_version_query(ah, true);
ar9003_mci_send_coex_wlan_channels(ah, true);
- if (mci->unhalt_bt_gpm) {
-
- ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ if (mci->unhalt_bt_gpm)
ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
- }
ar9003_mci_2g5g_switch(ah, true);
break;
-
case MCI_STATE_SET_BT_CAL_START:
mci->bt_state = MCI_BT_CAL_START;
break;
-
case MCI_STATE_SET_BT_CAL:
mci->bt_state = MCI_BT_CAL;
break;
-
case MCI_STATE_RESET_REQ_WAKE:
ar9003_mci_reset_req_wakeup(ah);
mci->update_2g5g = true;
- if ((AR_SREV_9462_20_OR_LATER(ah)) &&
- (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK) {
/* Check if we still have control of the GPIOs */
if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
- ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
- ATH_MCI_CONFIG_MCI_OBS_GPIO) {
-
- ath_dbg(common, MCI,
- "MCI reconfigure observation\n");
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) {
ar9003_mci_observation_set_up(ah);
}
}
break;
-
case MCI_STATE_SEND_WLAN_COEX_VERSION:
ar9003_mci_send_coex_version_response(ah, true);
break;
-
case MCI_STATE_SET_BT_COEX_VERSION:
-
if (!p_data)
ath_dbg(common, MCI,
"MCI Set BT Coex version with NULL data!!\n");
@@ -1415,7 +1335,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
mci->bt_ver_major, mci->bt_ver_minor);
}
break;
-
case MCI_STATE_SEND_WLAN_CHANNELS:
if (p_data) {
if (((mci->wlan_channels[1] & 0xffff0000) ==
@@ -1432,19 +1351,13 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
mci->wlan_channels_update = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
break;
-
case MCI_STATE_SEND_VERSION_QUERY:
ar9003_mci_send_coex_version_query(ah, true);
break;
-
case MCI_STATE_SEND_STATUS_QUERY:
- query_type = (AR_SREV_9462_10(ah)) ?
- MCI_GPM_COEX_QUERY_BT_ALL_INFO :
- MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
-
+ query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
-
case MCI_STATE_NEED_FLUSH_BT_INFO:
/*
* btcoex_hw.mci.unhalt_bt_gpm means whether it's
@@ -1464,28 +1377,21 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
mci->need_flush_btinfo =
(*p_data != 0) ? true : false;
break;
-
case MCI_STATE_RECOVER_RX:
-
- ath_dbg(common, MCI, "MCI hw RECOVER_RX\n");
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
ar9003_mci_2g5g_switch(ah, true);
break;
-
case MCI_STATE_NEED_FTP_STOMP:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
break;
-
case MCI_STATE_NEED_TUNING:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
break;
-
default:
break;
-
}
return value;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 798da116a44c..4842f6c06b8c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -99,4 +99,237 @@ enum mci_gpm_coex_bt_update_flags_op {
ATH_MCI_CONFIG_MCI_OBS_BT)
#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+enum mci_message_header { /* length of payload */
+ MCI_LNA_CTRL = 0x10, /* len = 0 */
+ MCI_CONT_NACK = 0x20, /* len = 0 */
+ MCI_CONT_INFO = 0x30, /* len = 4 */
+ MCI_CONT_RST = 0x40, /* len = 0 */
+ MCI_SCHD_INFO = 0x50, /* len = 16 */
+ MCI_CPU_INT = 0x60, /* len = 4 */
+ MCI_SYS_WAKING = 0x70, /* len = 0 */
+ MCI_GPM = 0x80, /* len = 16 */
+ MCI_LNA_INFO = 0x90, /* len = 1 */
+ MCI_LNA_STATE = 0x94,
+ MCI_LNA_TAKE = 0x98,
+ MCI_LNA_TRANS = 0x9c,
+ MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
+ MCI_REQ_WAKE = 0xc0, /* len = 0 */
+ MCI_DEBUG_16 = 0xfe, /* len = 2 */
+ MCI_REMOTE_RESET = 0xff /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+ MCI_GPM_COEX_PROFILE_UNKNOWN,
+ MCI_GPM_COEX_PROFILE_RFCOMM,
+ MCI_GPM_COEX_PROFILE_A2DP,
+ MCI_GPM_COEX_PROFILE_HID,
+ MCI_GPM_COEX_PROFILE_BNEP,
+ MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+ MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
+ MCI_GPM_COEX_B_GPM_TYPE = 4,
+ MCI_GPM_COEX_B_GPM_OPCODE = 5,
+ /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+ MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
+
+ /* MCI_GPM_COEX_VERSION_QUERY */
+ /* MCI_GPM_COEX_VERSION_RESPONSE */
+ MCI_GPM_COEX_B_MAJOR_VERSION = 6,
+ MCI_GPM_COEX_B_MINOR_VERSION = 7,
+ /* MCI_GPM_COEX_STATUS_QUERY */
+ MCI_GPM_COEX_B_BT_BITMAP = 6,
+ MCI_GPM_COEX_B_WLAN_BITMAP = 7,
+ /* MCI_GPM_COEX_HALT_BT_GPM */
+ MCI_GPM_COEX_B_HALT_STATE = 6,
+ /* MCI_GPM_COEX_WLAN_CHANNELS */
+ MCI_GPM_COEX_B_CHANNEL_MAP = 6,
+ /* MCI_GPM_COEX_BT_PROFILE_INFO */
+ MCI_GPM_COEX_B_PROFILE_TYPE = 6,
+ MCI_GPM_COEX_B_PROFILE_LINKID = 7,
+ MCI_GPM_COEX_B_PROFILE_STATE = 8,
+ MCI_GPM_COEX_B_PROFILE_ROLE = 9,
+ MCI_GPM_COEX_B_PROFILE_RATE = 10,
+ MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
+ MCI_GPM_COEX_H_PROFILE_T = 12,
+ MCI_GPM_COEX_B_PROFILE_W = 14,
+ MCI_GPM_COEX_B_PROFILE_A = 15,
+ /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+ MCI_GPM_COEX_B_STATUS_TYPE = 6,
+ MCI_GPM_COEX_B_STATUS_LINKID = 7,
+ MCI_GPM_COEX_B_STATUS_STATE = 8,
+ /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+ MCI_GPM_COEX_W_BT_FLAGS = 6,
+ MCI_GPM_COEX_B_BT_FLAGS_OP = 10
+};
+
+enum mci_gpm_subtype {
+ MCI_GPM_BT_CAL_REQ = 0,
+ MCI_GPM_BT_CAL_GRANT = 1,
+ MCI_GPM_BT_CAL_DONE = 2,
+ MCI_GPM_WLAN_CAL_REQ = 3,
+ MCI_GPM_WLAN_CAL_GRANT = 4,
+ MCI_GPM_WLAN_CAL_DONE = 5,
+ MCI_GPM_COEX_AGENT = 0x0c,
+ MCI_GPM_RSVD_PATTERN = 0xfe,
+ MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
+ MCI_GPM_BT_DEBUG = 0xff
+};
+
+enum mci_bt_state {
+ MCI_BT_SLEEP,
+ MCI_BT_AWAKE,
+ MCI_BT_CAL_START,
+ MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+ MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
+ MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
+ MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_RSSI_POWER,
+ MCI_STATE_CONT_PRIORITY,
+ MCI_STATE_CONT_TXRX,
+ MCI_STATE_RESET_REQ_WAKE,
+ MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
+ MCI_STATE_SEND_VERSION_QUERY,
+ MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
+ MCI_STATE_RECOVER_RX,
+ MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_DEBUG,
+ MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+ MCI_GPM_COEX_VERSION_QUERY,
+ MCI_GPM_COEX_VERSION_RESPONSE,
+ MCI_GPM_COEX_STATUS_QUERY,
+ MCI_GPM_COEX_HALT_BT_GPM,
+ MCI_GPM_COEX_WLAN_CHANNELS,
+ MCI_GPM_COEX_BT_PROFILE_INFO,
+ MCI_GPM_COEX_BT_STATUS_UPDATE,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE 0
+#define MCI_GPM_MORE 1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm) do { \
+ *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+ MCI_GPM_RSVD_PATTERN32; \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
+/*
+ * Functions that are available to the MCI driver core.
+ */
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr);
+
+/*
+ * These functions are used by ath9k_hw.
+ */
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.mci.ready;
+}
+void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
+void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
+void ar9003_mci_init_cal_done(struct ath_hw *ah);
+void ar9003_mci_set_full_sleep(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_check_bt(struct ath_hw *ah);
+bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
+int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
+void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
+
+#else
+
+static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
+{
+ return false;
+}
+static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
+{
+}
+static inline void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
+{
+}
+static inline void ar9003_mci_init_cal_done(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_set_full_sleep(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+}
+static inline void ar9003_mci_check_bt(struct ath_hw *ah)
+{
+}
+static inline bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ return false;
+}
+static inline int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata)
+{
+ return 0;
+}
+static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+}
+static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 2b0bfb8cca02..bc992b237ae5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -679,18 +679,17 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
* different modal values.
*/
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
- REG_WRITE_ARRAY(&ah->iniModesAdditional,
+ REG_WRITE_ARRAY(&ah->iniModesFastClock,
modesIndex, regWrites);
- if (AR_SREV_9330(ah))
- REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
-
- if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
- REG_WRITE_ARRAY(&ah->iniModesAdditional_40M, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
if (AR_SREV_9462(ah))
ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1);
+ if (chan->channel == 2484)
+ ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
+
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
@@ -1099,13 +1098,20 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
{
ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
- if (AR_SREV_9330(ah))
- ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
- else
- ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ;
+
+ if (AR_SREV_9330(ah))
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
+
+ if (AR_SREV_9462(ah)) {
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
+ ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
+ ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9462_5GHZ;
+ }
}
/*
@@ -1313,13 +1319,9 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
* different modal values.
*/
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
- REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, regWrites);
-
- if (AR_SREV_9330(ah))
- REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
- if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
- REG_WRITE_ARRAY(&ah->iniModesAdditional_40M, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
ah->modes_index = modesIndex;
*ini_reloaded = true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index ed64114571fc..d834d97fe727 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -325,13 +325,18 @@
#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
-#define AR_PHY_CCA_NOM_VAL_9300_2GHZ (AR_SREV_9462(ah) ? -127 : -110)
-#define AR_PHY_CCA_NOM_VAL_9300_5GHZ (AR_SREV_9462(ah) ? -127 : -115)
-#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ (AR_SREV_9462(ah) ? -127 : -125)
-#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ (AR_SREV_9462(ah) ? -127 : -125)
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
+
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
/*
@@ -612,16 +617,14 @@
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
-#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + (AR_SREV_9462_10(ah) ? \
- 0x4c0 : 0x4c4))
-#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + (AR_SREV_9462_10(ah) ? \
- 0x4c4 : 0x4c8))
+#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4))
+#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8))
#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1)
#define AR_PHY_65NM_CH0_SYNTH7 0x16098
#define AR_PHY_65NM_CH0_BIAS1 0x160c0
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h
deleted file mode 100644
index 5c55ae389adb..000000000000
--- a/drivers/net/wireless/ath/ath9k/ar9462_1p0_initvals.h
+++ /dev/null
@@ -1,1833 +0,0 @@
-/*
- * Copyright (c) 2010 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef INITVALS_9462_1P0_H
-#define INITVALS_9462_1P0_H
-
-/* AR9462 1.0 */
-
-static const u32 ar9462_1p0_mac_core[][2] = {
- /* Addr allmodes */
- {0x00000008, 0x00000000},
- {0x00000030, 0x00060085},
- {0x00000034, 0x00000005},
- {0x00000040, 0x00000000},
- {0x00000044, 0x00000000},
- {0x00000048, 0x00000008},
- {0x0000004c, 0x00000010},
- {0x00000050, 0x00000000},
- {0x00001040, 0x002ffc0f},
- {0x00001044, 0x002ffc0f},
- {0x00001048, 0x002ffc0f},
- {0x0000104c, 0x002ffc0f},
- {0x00001050, 0x002ffc0f},
- {0x00001054, 0x002ffc0f},
- {0x00001058, 0x002ffc0f},
- {0x0000105c, 0x002ffc0f},
- {0x00001060, 0x002ffc0f},
- {0x00001064, 0x002ffc0f},
- {0x000010f0, 0x00000100},
- {0x00001270, 0x00000000},
- {0x000012b0, 0x00000000},
- {0x000012f0, 0x00000000},
- {0x0000143c, 0x00000000},
- {0x0000147c, 0x00000000},
- {0x00001810, 0x0f000003},
- {0x00008000, 0x00000000},
- {0x00008004, 0x00000000},
- {0x00008008, 0x00000000},
- {0x0000800c, 0x00000000},
- {0x00008018, 0x00000000},
- {0x00008020, 0x00000000},
- {0x00008038, 0x00000000},
- {0x0000803c, 0x00080000},
- {0x00008040, 0x00000000},
- {0x00008044, 0x00000000},
- {0x00008048, 0x00000000},
- {0x0000804c, 0xffffffff},
- {0x00008050, 0xffffffff},
- {0x00008054, 0x00000000},
- {0x00008058, 0x00000000},
- {0x0000805c, 0x000fc78f},
- {0x00008060, 0x0000000f},
- {0x00008064, 0x00000000},
- {0x00008070, 0x00000310},
- {0x00008074, 0x00000020},
- {0x00008078, 0x00000000},
- {0x0000809c, 0x0000000f},
- {0x000080a0, 0x00000000},
- {0x000080a4, 0x02ff0000},
- {0x000080a8, 0x0e070605},
- {0x000080ac, 0x0000000d},
- {0x000080b0, 0x00000000},
- {0x000080b4, 0x00000000},
- {0x000080b8, 0x00000000},
- {0x000080bc, 0x00000000},
- {0x000080c0, 0x2a800000},
- {0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c20},
- {0x000080cc, 0x01f40000},
- {0x000080d0, 0x00252500},
- {0x000080d4, 0x00a00005},
- {0x000080d8, 0x00400002},
- {0x000080dc, 0x00000000},
- {0x000080e0, 0xffffffff},
- {0x000080e4, 0x0000ffff},
- {0x000080e8, 0x3f3f3f3f},
- {0x000080ec, 0x00000000},
- {0x000080f0, 0x00000000},
- {0x000080f4, 0x00000000},
- {0x000080fc, 0x00020000},
- {0x00008100, 0x00000000},
- {0x00008108, 0x00000052},
- {0x0000810c, 0x00000000},
- {0x00008110, 0x00000000},
- {0x00008114, 0x000007ff},
- {0x00008118, 0x000000aa},
- {0x0000811c, 0x00003210},
- {0x00008124, 0x00000000},
- {0x00008128, 0x00000000},
- {0x0000812c, 0x00000000},
- {0x00008130, 0x00000000},
- {0x00008134, 0x00000000},
- {0x00008138, 0x00000000},
- {0x0000813c, 0x0000ffff},
- {0x00008144, 0xffffffff},
- {0x00008168, 0x00000000},
- {0x0000816c, 0x00000000},
- {0x00008170, 0x18486e00},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
- {0x000081c4, 0x33332210},
- {0x000081c8, 0x00000000},
- {0x000081cc, 0x00000000},
- {0x000081d4, 0x00000000},
- {0x000081ec, 0x00000000},
- {0x000081f0, 0x00000000},
- {0x000081f4, 0x00000000},
- {0x000081f8, 0x00000000},
- {0x000081fc, 0x00000000},
- {0x00008240, 0x00100000},
- {0x00008244, 0x0010f400},
- {0x00008248, 0x00000800},
- {0x0000824c, 0x0001e800},
- {0x00008250, 0x00000000},
- {0x00008254, 0x00000000},
- {0x00008258, 0x00000000},
- {0x0000825c, 0x40000000},
- {0x00008260, 0x00080922},
- {0x00008264, 0x99c00010},
- {0x00008268, 0xffffffff},
- {0x0000826c, 0x0000ffff},
- {0x00008270, 0x00000000},
- {0x00008274, 0x40000000},
- {0x00008278, 0x003e4180},
- {0x0000827c, 0x00000004},
- {0x00008284, 0x0000002c},
- {0x00008288, 0x0000002c},
- {0x0000828c, 0x000000ff},
- {0x00008294, 0x00000000},
- {0x00008298, 0x00000000},
- {0x0000829c, 0x00000000},
- {0x00008300, 0x00000140},
- {0x00008314, 0x00000000},
- {0x0000831c, 0x0000010d},
- {0x00008328, 0x00000000},
- {0x0000832c, 0x0000001f},
- {0x00008330, 0x00000302},
- {0x00008334, 0x00000700},
- {0x00008338, 0xffff0000},
- {0x0000833c, 0x02400000},
- {0x00008340, 0x000107ff},
- {0x00008344, 0xaa48105b},
- {0x00008348, 0x008f0000},
- {0x0000835c, 0x00000000},
- {0x00008360, 0xffffffff},
- {0x00008364, 0xffffffff},
- {0x00008368, 0x00000000},
- {0x00008370, 0x00000000},
- {0x00008374, 0x000000ff},
- {0x00008378, 0x00000000},
- {0x0000837c, 0x00000000},
- {0x00008380, 0xffffffff},
- {0x00008384, 0xffffffff},
- {0x00008390, 0xffffffff},
- {0x00008394, 0xffffffff},
- {0x00008398, 0x00000000},
- {0x0000839c, 0x00000000},
- {0x000083a4, 0x0000fa14},
- {0x000083a8, 0x000f0c00},
- {0x000083ac, 0x33332210},
- {0x000083b0, 0x33332210},
- {0x000083b4, 0x33332210},
- {0x000083b8, 0x33332210},
- {0x000083bc, 0x00000000},
- {0x000083c0, 0x00000000},
- {0x000083c4, 0x00000000},
- {0x000083c8, 0x00000000},
- {0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
-};
-
-static const u32 ar9462_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9462_1p0_sys3ant[][2] = {
- /* Addr allmodes */
- {0x00063280, 0x00040807},
- {0x00063284, 0x104ccccc},
-};
-
-static const u32 ar9462_pcie_phy_clkreq_enable_L1_1p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x10053e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000580c},
-};
-
-static const u32 ar9462_1p0_mac_core_emulation[][2] = {
- /* Addr allmodes */
- {0x00000030, 0x00060085},
- {0x00000044, 0x00000008},
- {0x0000805c, 0xffffc7ff},
- {0x00008344, 0xaa4a105b},
-};
-
-static const u32 ar9462_common_rx_gain_table_ar9280_2p0_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x02000101},
- {0x0000a004, 0x02000102},
- {0x0000a008, 0x02000103},
- {0x0000a00c, 0x02000104},
- {0x0000a010, 0x02000200},
- {0x0000a014, 0x02000201},
- {0x0000a018, 0x02000202},
- {0x0000a01c, 0x02000203},
- {0x0000a020, 0x02000204},
- {0x0000a024, 0x02000205},
- {0x0000a028, 0x02000208},
- {0x0000a02c, 0x02000302},
- {0x0000a030, 0x02000303},
- {0x0000a034, 0x02000304},
- {0x0000a038, 0x02000400},
- {0x0000a03c, 0x02010300},
- {0x0000a040, 0x02010301},
- {0x0000a044, 0x02010302},
- {0x0000a048, 0x02000500},
- {0x0000a04c, 0x02010400},
- {0x0000a050, 0x02020300},
- {0x0000a054, 0x02020301},
- {0x0000a058, 0x02020302},
- {0x0000a05c, 0x02020303},
- {0x0000a060, 0x02020400},
- {0x0000a064, 0x02030300},
- {0x0000a068, 0x02030301},
- {0x0000a06c, 0x02030302},
- {0x0000a070, 0x02030303},
- {0x0000a074, 0x02030400},
- {0x0000a078, 0x02040300},
- {0x0000a07c, 0x02040301},
- {0x0000a080, 0x02040302},
- {0x0000a084, 0x02040303},
- {0x0000a088, 0x02030500},
- {0x0000a08c, 0x02040400},
- {0x0000a090, 0x02050203},
- {0x0000a094, 0x02050204},
- {0x0000a098, 0x02050205},
- {0x0000a09c, 0x02040500},
- {0x0000a0a0, 0x02050301},
- {0x0000a0a4, 0x02050302},
- {0x0000a0a8, 0x02050303},
- {0x0000a0ac, 0x02050400},
- {0x0000a0b0, 0x02050401},
- {0x0000a0b4, 0x02050402},
- {0x0000a0b8, 0x02050403},
- {0x0000a0bc, 0x02050500},
- {0x0000a0c0, 0x02050501},
- {0x0000a0c4, 0x02050502},
- {0x0000a0c8, 0x02050503},
- {0x0000a0cc, 0x02050504},
- {0x0000a0d0, 0x02050600},
- {0x0000a0d4, 0x02050601},
- {0x0000a0d8, 0x02050602},
- {0x0000a0dc, 0x02050603},
- {0x0000a0e0, 0x02050604},
- {0x0000a0e4, 0x02050700},
- {0x0000a0e8, 0x02050701},
- {0x0000a0ec, 0x02050702},
- {0x0000a0f0, 0x02050703},
- {0x0000a0f4, 0x02050704},
- {0x0000a0f8, 0x02050705},
- {0x0000a0fc, 0x02050708},
- {0x0000a100, 0x02050709},
- {0x0000a104, 0x0205070a},
- {0x0000a108, 0x0205070b},
- {0x0000a10c, 0x0205070c},
- {0x0000a110, 0x0205070d},
- {0x0000a114, 0x02050710},
- {0x0000a118, 0x02050711},
- {0x0000a11c, 0x02050712},
- {0x0000a120, 0x02050713},
- {0x0000a124, 0x02050714},
- {0x0000a128, 0x02050715},
- {0x0000a12c, 0x02050730},
- {0x0000a130, 0x02050731},
- {0x0000a134, 0x02050732},
- {0x0000a138, 0x02050733},
- {0x0000a13c, 0x02050734},
- {0x0000a140, 0x02050735},
- {0x0000a144, 0x02050750},
- {0x0000a148, 0x02050751},
- {0x0000a14c, 0x02050752},
- {0x0000a150, 0x02050753},
- {0x0000a154, 0x02050754},
- {0x0000a158, 0x02050755},
- {0x0000a15c, 0x02050770},
- {0x0000a160, 0x02050771},
- {0x0000a164, 0x02050772},
- {0x0000a168, 0x02050773},
- {0x0000a16c, 0x02050774},
- {0x0000a170, 0x02050775},
- {0x0000a174, 0x00000776},
- {0x0000a178, 0x00000776},
- {0x0000a17c, 0x00000776},
- {0x0000a180, 0x00000776},
- {0x0000a184, 0x00000776},
- {0x0000a188, 0x00000776},
- {0x0000a18c, 0x00000776},
- {0x0000a190, 0x00000776},
- {0x0000a194, 0x00000776},
- {0x0000a198, 0x00000776},
- {0x0000a19c, 0x00000776},
- {0x0000a1a0, 0x00000776},
- {0x0000a1a4, 0x00000776},
- {0x0000a1a8, 0x00000776},
- {0x0000a1ac, 0x00000776},
- {0x0000a1b0, 0x00000776},
- {0x0000a1b4, 0x00000776},
- {0x0000a1b8, 0x00000776},
- {0x0000a1bc, 0x00000776},
- {0x0000a1c0, 0x00000776},
- {0x0000a1c4, 0x00000776},
- {0x0000a1c8, 0x00000776},
- {0x0000a1cc, 0x00000776},
- {0x0000a1d0, 0x00000776},
- {0x0000a1d4, 0x00000776},
- {0x0000a1d8, 0x00000776},
- {0x0000a1dc, 0x00000776},
- {0x0000a1e0, 0x00000776},
- {0x0000a1e4, 0x00000776},
- {0x0000a1e8, 0x00000776},
- {0x0000a1ec, 0x00000776},
- {0x0000a1f0, 0x00000776},
- {0x0000a1f4, 0x00000776},
- {0x0000a1f8, 0x00000776},
- {0x0000a1fc, 0x00000776},
- {0x0000b000, 0x02000101},
- {0x0000b004, 0x02000102},
- {0x0000b008, 0x02000103},
- {0x0000b00c, 0x02000104},
- {0x0000b010, 0x02000200},
- {0x0000b014, 0x02000201},
- {0x0000b018, 0x02000202},
- {0x0000b01c, 0x02000203},
- {0x0000b020, 0x02000204},
- {0x0000b024, 0x02000205},
- {0x0000b028, 0x02000208},
- {0x0000b02c, 0x02000302},
- {0x0000b030, 0x02000303},
- {0x0000b034, 0x02000304},
- {0x0000b038, 0x02000400},
- {0x0000b03c, 0x02010300},
- {0x0000b040, 0x02010301},
- {0x0000b044, 0x02010302},
- {0x0000b048, 0x02000500},
- {0x0000b04c, 0x02010400},
- {0x0000b050, 0x02020300},
- {0x0000b054, 0x02020301},
- {0x0000b058, 0x02020302},
- {0x0000b05c, 0x02020303},
- {0x0000b060, 0x02020400},
- {0x0000b064, 0x02030300},
- {0x0000b068, 0x02030301},
- {0x0000b06c, 0x02030302},
- {0x0000b070, 0x02030303},
- {0x0000b074, 0x02030400},
- {0x0000b078, 0x02040300},
- {0x0000b07c, 0x02040301},
- {0x0000b080, 0x02040302},
- {0x0000b084, 0x02040303},
- {0x0000b088, 0x02030500},
- {0x0000b08c, 0x02040400},
- {0x0000b090, 0x02050203},
- {0x0000b094, 0x02050204},
- {0x0000b098, 0x02050205},
- {0x0000b09c, 0x02040500},
- {0x0000b0a0, 0x02050301},
- {0x0000b0a4, 0x02050302},
- {0x0000b0a8, 0x02050303},
- {0x0000b0ac, 0x02050400},
- {0x0000b0b0, 0x02050401},
- {0x0000b0b4, 0x02050402},
- {0x0000b0b8, 0x02050403},
- {0x0000b0bc, 0x02050500},
- {0x0000b0c0, 0x02050501},
- {0x0000b0c4, 0x02050502},
- {0x0000b0c8, 0x02050503},
- {0x0000b0cc, 0x02050504},
- {0x0000b0d0, 0x02050600},
- {0x0000b0d4, 0x02050601},
- {0x0000b0d8, 0x02050602},
- {0x0000b0dc, 0x02050603},
- {0x0000b0e0, 0x02050604},
- {0x0000b0e4, 0x02050700},
- {0x0000b0e8, 0x02050701},
- {0x0000b0ec, 0x02050702},
- {0x0000b0f0, 0x02050703},
- {0x0000b0f4, 0x02050704},
- {0x0000b0f8, 0x02050705},
- {0x0000b0fc, 0x02050708},
- {0x0000b100, 0x02050709},
- {0x0000b104, 0x0205070a},
- {0x0000b108, 0x0205070b},
- {0x0000b10c, 0x0205070c},
- {0x0000b110, 0x0205070d},
- {0x0000b114, 0x02050710},
- {0x0000b118, 0x02050711},
- {0x0000b11c, 0x02050712},
- {0x0000b120, 0x02050713},
- {0x0000b124, 0x02050714},
- {0x0000b128, 0x02050715},
- {0x0000b12c, 0x02050730},
- {0x0000b130, 0x02050731},
- {0x0000b134, 0x02050732},
- {0x0000b138, 0x02050733},
- {0x0000b13c, 0x02050734},
- {0x0000b140, 0x02050735},
- {0x0000b144, 0x02050750},
- {0x0000b148, 0x02050751},
- {0x0000b14c, 0x02050752},
- {0x0000b150, 0x02050753},
- {0x0000b154, 0x02050754},
- {0x0000b158, 0x02050755},
- {0x0000b15c, 0x02050770},
- {0x0000b160, 0x02050771},
- {0x0000b164, 0x02050772},
- {0x0000b168, 0x02050773},
- {0x0000b16c, 0x02050774},
- {0x0000b170, 0x02050775},
- {0x0000b174, 0x00000776},
- {0x0000b178, 0x00000776},
- {0x0000b17c, 0x00000776},
- {0x0000b180, 0x00000776},
- {0x0000b184, 0x00000776},
- {0x0000b188, 0x00000776},
- {0x0000b18c, 0x00000776},
- {0x0000b190, 0x00000776},
- {0x0000b194, 0x00000776},
- {0x0000b198, 0x00000776},
- {0x0000b19c, 0x00000776},
- {0x0000b1a0, 0x00000776},
- {0x0000b1a4, 0x00000776},
- {0x0000b1a8, 0x00000776},
- {0x0000b1ac, 0x00000776},
- {0x0000b1b0, 0x00000776},
- {0x0000b1b4, 0x00000776},
- {0x0000b1b8, 0x00000776},
- {0x0000b1bc, 0x00000776},
- {0x0000b1c0, 0x00000776},
- {0x0000b1c4, 0x00000776},
- {0x0000b1c8, 0x00000776},
- {0x0000b1cc, 0x00000776},
- {0x0000b1d0, 0x00000776},
- {0x0000b1d4, 0x00000776},
- {0x0000b1d8, 0x00000776},
- {0x0000b1dc, 0x00000776},
- {0x0000b1e0, 0x00000776},
- {0x0000b1e4, 0x00000776},
- {0x0000b1e8, 0x00000776},
- {0x0000b1ec, 0x00000776},
- {0x0000b1f0, 0x00000776},
- {0x0000b1f4, 0x00000776},
- {0x0000b1f8, 0x00000776},
- {0x0000b1fc, 0x00000776},
-};
-
-static const u32 ar9200_ar9280_2p0_radio_core_1p0[][2] = {
- /* Addr allmodes */
- {0x00007800, 0x00040000},
- {0x00007804, 0xdb005012},
- {0x00007808, 0x04924914},
- {0x0000780c, 0x21084210},
- {0x00007810, 0x6d801300},
- {0x00007814, 0x0019beff},
- {0x00007818, 0x07e41000},
- {0x0000781c, 0x00392000},
- {0x00007820, 0x92592480},
- {0x00007824, 0x00040000},
- {0x00007828, 0xdb005012},
- {0x0000782c, 0x04924914},
- {0x00007830, 0x21084210},
- {0x00007834, 0x6d801300},
- {0x00007838, 0x0019beff},
- {0x0000783c, 0x07e40000},
- {0x00007840, 0x00392000},
- {0x00007844, 0x92592480},
- {0x00007848, 0x00100000},
- {0x0000784c, 0x773f0567},
- {0x00007850, 0x54214514},
- {0x00007854, 0x12035828},
- {0x00007858, 0x92592692},
- {0x0000785c, 0x00000000},
- {0x00007860, 0x56400000},
- {0x00007864, 0x0a8e370e},
- {0x00007868, 0xc0102850},
- {0x0000786c, 0x812d4000},
- {0x00007870, 0x807ec400},
- {0x00007874, 0x001b6db0},
- {0x00007878, 0x00376b63},
- {0x0000787c, 0x06db6db6},
- {0x00007880, 0x006d8000},
- {0x00007884, 0xffeffffe},
- {0x00007888, 0xffeffffe},
- {0x0000788c, 0x00010000},
- {0x00007890, 0x02060aeb},
- {0x00007894, 0x5a108000},
-};
-
-static const u32 ar9462_1p0_baseband_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e3c, 0xcf946221, 0xcf946221, 0xcf946221, 0xcf946221},
- {0x00009e44, 0x005c0000, 0x005c0000, 0x005c0000, 0x005c0000},
- {0x0000a258, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x0000a25c, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a28c, 0x00011111, 0x00011111, 0x00011111, 0x00011111},
- {0x0000a2c4, 0x00148d18, 0x00148d18, 0x00148d20, 0x00148d20},
- {0x0000a2d8, 0xf999a800, 0xf999a800, 0xf999a80c, 0xf999a80c},
- {0x0000a50c, 0x0000c00a, 0x0000c00a, 0x0000c00a, 0x0000c00a},
- {0x0000a538, 0x00038e8c, 0x00038e8c, 0x00038e8c, 0x00038e8c},
- {0x0000a53c, 0x0003cecc, 0x0003cecc, 0x0003cecc, 0x0003cecc},
- {0x0000a540, 0x00040ed4, 0x00040ed4, 0x00040ed4, 0x00040ed4},
- {0x0000a544, 0x00044edc, 0x00044edc, 0x00044edc, 0x00044edc},
- {0x0000a548, 0x00048ede, 0x00048ede, 0x00048ede, 0x00048ede},
- {0x0000a54c, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
- {0x0000a550, 0x00050f5e, 0x00050f5e, 0x00050f5e, 0x00050f5e},
- {0x0000a554, 0x00054f9e, 0x00054f9e, 0x00054f9e, 0x00054f9e},
- {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
-static const u32 ar9462_pcie_phy_pll_on_clkreq_disable_L1_1p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x10012e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000580c},
-};
-
-static const u32 ar9462_common_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_modes_high_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
- {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
-};
-
-static const u32 ar9462_common_wo_xlna_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_1p0_mac_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00008014, 0x10f810f8, 0x10f810f8, 0x10f810f8, 0x10f810f8},
- {0x0000801c, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017},
-};
-
-static const u32 ar9462_1p0_tx_gain_table_baseband_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000000d5, 0x000000d5, 0x000000d5, 0x000000d5},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x00004002, 0x00004002, 0x00004002, 0x00004002},
- {0x0000a508, 0x00008004, 0x00008004, 0x00008004, 0x00008004},
- {0x0000a510, 0x0001000c, 0x0001000c, 0x0001000c, 0x0001000c},
- {0x0000a514, 0x0001420b, 0x0001420b, 0x0001420b, 0x0001420b},
- {0x0000a518, 0x0001824a, 0x0001824a, 0x0001824a, 0x0001824a},
- {0x0000a51c, 0x0001c44a, 0x0001c44a, 0x0001c44a, 0x0001c44a},
- {0x0000a520, 0x0002064a, 0x0002064a, 0x0002064a, 0x0002064a},
- {0x0000a524, 0x0002484a, 0x0002484a, 0x0002484a, 0x0002484a},
- {0x0000a528, 0x00028a4a, 0x00028a4a, 0x00028a4a, 0x00028a4a},
- {0x0000a52c, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
- {0x0000a530, 0x00030e4a, 0x00030e4a, 0x00030e4a, 0x00030e4a},
- {0x0000a534, 0x00034e8a, 0x00034e8a, 0x00034e8a, 0x00034e8a},
-};
-
-static const u32 ar9462_1p0_radio_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
- {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24646c08, 0x24646c08},
- {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
- {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
- {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
- {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-};
-
-static const u32 ar9462_1p0_soc_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00001133, 0x00001133, 0x00001133, 0x00001133},
-};
-
-static const u32 ar9462_1p0_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a9f6b},
- {0x0000980c, 0x04900000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x32840bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098e4, 0x01ffffff},
- {0x000098e8, 0x01ffffff},
- {0x000098ec, 0x01ffffff},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009c04, 0xff55ff55},
- {0x00009c08, 0x0320ff55},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c0040b},
- {0x00009d18, 0x00000000},
- {0x00009e08, 0x0038230c},
- {0x00009e24, 0x990bb514},
- {0x00009e28, 0x0c6f0000},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009e54, 0x64c355c7},
- {0x00009e58, 0xfd897735},
- {0x00009e5c, 0xe9198724},
- {0x00009fc0, 0x803e4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x00009fd0, 0x01193b93},
- {0x0000a20c, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0c9bd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
- {0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00100000},
- {0x0000a440, 0x00000000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x05000080},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a458, 0x00000000},
- {0x0000a644, 0xbfad9d74},
- {0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00003c37},
- {0x0000a670, 0x03020100},
- {0x0000a674, 0x09080504},
- {0x0000a678, 0x0d0c0b0a},
- {0x0000a67c, 0x13121110},
- {0x0000a680, 0x31301514},
- {0x0000a684, 0x35343332},
- {0x0000a688, 0x00000036},
- {0x0000a690, 0x00000838},
- {0x0000a6b0, 0x0000000a},
- {0x0000a6b4, 0x28f12c01},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000001},
- {0x0000a8d0, 0x004b6a8e},
- {0x0000a8d4, 0x00000820},
- {0x0000a8dc, 0x00000000},
- {0x0000a8f0, 0x00000000},
- {0x0000a8f4, 0x00000000},
- {0x0000b2d0, 0x00000080},
- {0x0000b2d4, 0x00000000},
- {0x0000b2ec, 0x00000000},
- {0x0000b2f0, 0x00000000},
- {0x0000b2f4, 0x00000000},
- {0x0000b2f8, 0x00000000},
- {0x0000b408, 0x0e79e5c0},
- {0x0000b40c, 0x00820820},
- {0x0000b420, 0x00000000},
- {0x0000b6b0, 0x0000000a},
- {0x0000b6b4, 0x00c00001},
-};
-
-static const u32 ar9462_1p0_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
- {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
- {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
- {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
- {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
- {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
- {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x0131b7c0, 0x0131b7c4, 0x0131b7c4, 0x0131b7c0},
- {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
- {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
- {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
- {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
- {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
- {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
- {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
- {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
- {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
- {0x0000a288, 0x00000110, 0x00000110, 0x00100110, 0x00100110},
- {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
- {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
- {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
- {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
- {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
-};
-
-static const u32 ar9462_modes_fast_clock_1p0[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000400b, 0x00004016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
-
-static const u32 ar9462_modes_low_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
- {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
- {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
- {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
-};
-
-static const u32 ar9462_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
-};
-
-static const u32 ar9462_common_mixed_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_pcie_phy_clkreq_disable_L1_1p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x10013e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000580c},
-};
-
-static const u32 ar9462_1p0_baseband_core_emulation[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafa68e30},
- {0x00009884, 0x00002842},
- {0x00009c04, 0xff55ff55},
- {0x00009c08, 0x0320ff55},
- {0x00009e50, 0x00000000},
- {0x00009fcc, 0x00000014},
- {0x0000a344, 0x00000010},
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x71733d01},
- {0x0000a3a0, 0xd0ad5c12},
- {0x0000a3c0, 0x22222220},
- {0x0000a3c4, 0x22222222},
- {0x0000a404, 0x00418a11},
- {0x0000a418, 0x050001ce},
- {0x0000a438, 0x00001800},
- {0x0000a458, 0x01444452},
- {0x0000a644, 0x3fad9d74},
- {0x0000a690, 0x00000038},
-};
-
-static const u32 ar9462_1p0_radio_core[][2] = {
- /* Addr allmodes */
- {0x00016000, 0x36db6db6},
- {0x00016004, 0x6db6db40},
- {0x00016008, 0x73f00000},
- {0x0001600c, 0x00000000},
- {0x00016010, 0x6d820001},
- {0x00016040, 0x7f80fff8},
- {0x0001604c, 0x2699e04f},
- {0x00016050, 0x6db6db6c},
- {0x00016054, 0x6db60000},
- {0x00016058, 0x6c200000},
- {0x00016080, 0x00040000},
- {0x00016084, 0x9a68048c},
- {0x00016088, 0x54214514},
- {0x0001608c, 0x12030409},
- {0x00016090, 0x24926490},
- {0x00016098, 0xd2888888},
- {0x000160a0, 0x0a108ffe},
- {0x000160a4, 0x812fc490},
- {0x000160a8, 0x423c8000},
- {0x000160b4, 0x92000000},
- {0x000160b8, 0x0285dddc},
- {0x000160bc, 0x02908888},
- {0x000160c0, 0x00adb6d0},
- {0x000160c4, 0x6db6db60},
- {0x000160c8, 0x6db6db6c},
- {0x000160cc, 0x0de6c1b0},
- {0x00016100, 0x3fffbe04},
- {0x00016104, 0xfff80000},
- {0x00016108, 0x00200400},
- {0x00016110, 0x00000000},
- {0x00016144, 0x02084080},
- {0x00016148, 0x000080c0},
- {0x00016280, 0x050a0001},
- {0x00016284, 0x3d841400},
- {0x00016288, 0x00000000},
- {0x0001628c, 0xe3000000},
- {0x00016290, 0xa1005080},
- {0x00016294, 0x00000020},
- {0x00016298, 0x50a02900},
- {0x00016340, 0x121e4276},
- {0x00016344, 0x00300000},
- {0x00016400, 0x36db6db6},
- {0x00016404, 0x6db6db40},
- {0x00016408, 0x73f00000},
- {0x0001640c, 0x00000000},
- {0x00016410, 0x6c800001},
- {0x00016440, 0x7f80fff8},
- {0x0001644c, 0x4699e04f},
- {0x00016450, 0x6db6db6c},
- {0x00016454, 0x6db60000},
- {0x00016500, 0x3fffbe04},
- {0x00016504, 0xfff80000},
- {0x00016508, 0x00200400},
- {0x00016510, 0x00000000},
- {0x00016544, 0x02084080},
- {0x00016548, 0x000080c0},
-};
-
-static const u32 ar9462_1p0_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
-};
-
-static const u32 ar9462_1p0_sys2ant[][2] = {
- /* Addr allmodes */
- {0x00063120, 0x00801980},
-};
-
-#endif /* INITVALS_9462_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index dc2054f0378e..b6ba1e8149be 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -98,14 +98,6 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
-static const u32 ar9462_2p0_mac_core_emulation[][2] = {
- /* Addr allmodes */
- {0x00000030, 0x000e0085},
- {0x00000044, 0x00000008},
- {0x0000805c, 0xffffc7ff},
- {0x00008344, 0xaa4a105b},
-};
-
static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
@@ -380,349 +372,6 @@ static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
{0x00018c08, 0x0003580c},
};
-static const u32 ar9462_2p0_sys3ant[][2] = {
- /* Addr allmodes */
- {0x00063280, 0x00040807},
- {0x00063284, 0x104ccccc},
-};
-
-static const u32 ar9462_common_rx_gain_table_ar9280_2p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x02000101},
- {0x0000a004, 0x02000102},
- {0x0000a008, 0x02000103},
- {0x0000a00c, 0x02000104},
- {0x0000a010, 0x02000200},
- {0x0000a014, 0x02000201},
- {0x0000a018, 0x02000202},
- {0x0000a01c, 0x02000203},
- {0x0000a020, 0x02000204},
- {0x0000a024, 0x02000205},
- {0x0000a028, 0x02000208},
- {0x0000a02c, 0x02000302},
- {0x0000a030, 0x02000303},
- {0x0000a034, 0x02000304},
- {0x0000a038, 0x02000400},
- {0x0000a03c, 0x02010300},
- {0x0000a040, 0x02010301},
- {0x0000a044, 0x02010302},
- {0x0000a048, 0x02000500},
- {0x0000a04c, 0x02010400},
- {0x0000a050, 0x02020300},
- {0x0000a054, 0x02020301},
- {0x0000a058, 0x02020302},
- {0x0000a05c, 0x02020303},
- {0x0000a060, 0x02020400},
- {0x0000a064, 0x02030300},
- {0x0000a068, 0x02030301},
- {0x0000a06c, 0x02030302},
- {0x0000a070, 0x02030303},
- {0x0000a074, 0x02030400},
- {0x0000a078, 0x02040300},
- {0x0000a07c, 0x02040301},
- {0x0000a080, 0x02040302},
- {0x0000a084, 0x02040303},
- {0x0000a088, 0x02030500},
- {0x0000a08c, 0x02040400},
- {0x0000a090, 0x02050203},
- {0x0000a094, 0x02050204},
- {0x0000a098, 0x02050205},
- {0x0000a09c, 0x02040500},
- {0x0000a0a0, 0x02050301},
- {0x0000a0a4, 0x02050302},
- {0x0000a0a8, 0x02050303},
- {0x0000a0ac, 0x02050400},
- {0x0000a0b0, 0x02050401},
- {0x0000a0b4, 0x02050402},
- {0x0000a0b8, 0x02050403},
- {0x0000a0bc, 0x02050500},
- {0x0000a0c0, 0x02050501},
- {0x0000a0c4, 0x02050502},
- {0x0000a0c8, 0x02050503},
- {0x0000a0cc, 0x02050504},
- {0x0000a0d0, 0x02050600},
- {0x0000a0d4, 0x02050601},
- {0x0000a0d8, 0x02050602},
- {0x0000a0dc, 0x02050603},
- {0x0000a0e0, 0x02050604},
- {0x0000a0e4, 0x02050700},
- {0x0000a0e8, 0x02050701},
- {0x0000a0ec, 0x02050702},
- {0x0000a0f0, 0x02050703},
- {0x0000a0f4, 0x02050704},
- {0x0000a0f8, 0x02050705},
- {0x0000a0fc, 0x02050708},
- {0x0000a100, 0x02050709},
- {0x0000a104, 0x0205070a},
- {0x0000a108, 0x0205070b},
- {0x0000a10c, 0x0205070c},
- {0x0000a110, 0x0205070d},
- {0x0000a114, 0x02050710},
- {0x0000a118, 0x02050711},
- {0x0000a11c, 0x02050712},
- {0x0000a120, 0x02050713},
- {0x0000a124, 0x02050714},
- {0x0000a128, 0x02050715},
- {0x0000a12c, 0x02050730},
- {0x0000a130, 0x02050731},
- {0x0000a134, 0x02050732},
- {0x0000a138, 0x02050733},
- {0x0000a13c, 0x02050734},
- {0x0000a140, 0x02050735},
- {0x0000a144, 0x02050750},
- {0x0000a148, 0x02050751},
- {0x0000a14c, 0x02050752},
- {0x0000a150, 0x02050753},
- {0x0000a154, 0x02050754},
- {0x0000a158, 0x02050755},
- {0x0000a15c, 0x02050770},
- {0x0000a160, 0x02050771},
- {0x0000a164, 0x02050772},
- {0x0000a168, 0x02050773},
- {0x0000a16c, 0x02050774},
- {0x0000a170, 0x02050775},
- {0x0000a174, 0x00000776},
- {0x0000a178, 0x00000776},
- {0x0000a17c, 0x00000776},
- {0x0000a180, 0x00000776},
- {0x0000a184, 0x00000776},
- {0x0000a188, 0x00000776},
- {0x0000a18c, 0x00000776},
- {0x0000a190, 0x00000776},
- {0x0000a194, 0x00000776},
- {0x0000a198, 0x00000776},
- {0x0000a19c, 0x00000776},
- {0x0000a1a0, 0x00000776},
- {0x0000a1a4, 0x00000776},
- {0x0000a1a8, 0x00000776},
- {0x0000a1ac, 0x00000776},
- {0x0000a1b0, 0x00000776},
- {0x0000a1b4, 0x00000776},
- {0x0000a1b8, 0x00000776},
- {0x0000a1bc, 0x00000776},
- {0x0000a1c0, 0x00000776},
- {0x0000a1c4, 0x00000776},
- {0x0000a1c8, 0x00000776},
- {0x0000a1cc, 0x00000776},
- {0x0000a1d0, 0x00000776},
- {0x0000a1d4, 0x00000776},
- {0x0000a1d8, 0x00000776},
- {0x0000a1dc, 0x00000776},
- {0x0000a1e0, 0x00000776},
- {0x0000a1e4, 0x00000776},
- {0x0000a1e8, 0x00000776},
- {0x0000a1ec, 0x00000776},
- {0x0000a1f0, 0x00000776},
- {0x0000a1f4, 0x00000776},
- {0x0000a1f8, 0x00000776},
- {0x0000a1fc, 0x00000776},
- {0x0000b000, 0x02000101},
- {0x0000b004, 0x02000102},
- {0x0000b008, 0x02000103},
- {0x0000b00c, 0x02000104},
- {0x0000b010, 0x02000200},
- {0x0000b014, 0x02000201},
- {0x0000b018, 0x02000202},
- {0x0000b01c, 0x02000203},
- {0x0000b020, 0x02000204},
- {0x0000b024, 0x02000205},
- {0x0000b028, 0x02000208},
- {0x0000b02c, 0x02000302},
- {0x0000b030, 0x02000303},
- {0x0000b034, 0x02000304},
- {0x0000b038, 0x02000400},
- {0x0000b03c, 0x02010300},
- {0x0000b040, 0x02010301},
- {0x0000b044, 0x02010302},
- {0x0000b048, 0x02000500},
- {0x0000b04c, 0x02010400},
- {0x0000b050, 0x02020300},
- {0x0000b054, 0x02020301},
- {0x0000b058, 0x02020302},
- {0x0000b05c, 0x02020303},
- {0x0000b060, 0x02020400},
- {0x0000b064, 0x02030300},
- {0x0000b068, 0x02030301},
- {0x0000b06c, 0x02030302},
- {0x0000b070, 0x02030303},
- {0x0000b074, 0x02030400},
- {0x0000b078, 0x02040300},
- {0x0000b07c, 0x02040301},
- {0x0000b080, 0x02040302},
- {0x0000b084, 0x02040303},
- {0x0000b088, 0x02030500},
- {0x0000b08c, 0x02040400},
- {0x0000b090, 0x02050203},
- {0x0000b094, 0x02050204},
- {0x0000b098, 0x02050205},
- {0x0000b09c, 0x02040500},
- {0x0000b0a0, 0x02050301},
- {0x0000b0a4, 0x02050302},
- {0x0000b0a8, 0x02050303},
- {0x0000b0ac, 0x02050400},
- {0x0000b0b0, 0x02050401},
- {0x0000b0b4, 0x02050402},
- {0x0000b0b8, 0x02050403},
- {0x0000b0bc, 0x02050500},
- {0x0000b0c0, 0x02050501},
- {0x0000b0c4, 0x02050502},
- {0x0000b0c8, 0x02050503},
- {0x0000b0cc, 0x02050504},
- {0x0000b0d0, 0x02050600},
- {0x0000b0d4, 0x02050601},
- {0x0000b0d8, 0x02050602},
- {0x0000b0dc, 0x02050603},
- {0x0000b0e0, 0x02050604},
- {0x0000b0e4, 0x02050700},
- {0x0000b0e8, 0x02050701},
- {0x0000b0ec, 0x02050702},
- {0x0000b0f0, 0x02050703},
- {0x0000b0f4, 0x02050704},
- {0x0000b0f8, 0x02050705},
- {0x0000b0fc, 0x02050708},
- {0x0000b100, 0x02050709},
- {0x0000b104, 0x0205070a},
- {0x0000b108, 0x0205070b},
- {0x0000b10c, 0x0205070c},
- {0x0000b110, 0x0205070d},
- {0x0000b114, 0x02050710},
- {0x0000b118, 0x02050711},
- {0x0000b11c, 0x02050712},
- {0x0000b120, 0x02050713},
- {0x0000b124, 0x02050714},
- {0x0000b128, 0x02050715},
- {0x0000b12c, 0x02050730},
- {0x0000b130, 0x02050731},
- {0x0000b134, 0x02050732},
- {0x0000b138, 0x02050733},
- {0x0000b13c, 0x02050734},
- {0x0000b140, 0x02050735},
- {0x0000b144, 0x02050750},
- {0x0000b148, 0x02050751},
- {0x0000b14c, 0x02050752},
- {0x0000b150, 0x02050753},
- {0x0000b154, 0x02050754},
- {0x0000b158, 0x02050755},
- {0x0000b15c, 0x02050770},
- {0x0000b160, 0x02050771},
- {0x0000b164, 0x02050772},
- {0x0000b168, 0x02050773},
- {0x0000b16c, 0x02050774},
- {0x0000b170, 0x02050775},
- {0x0000b174, 0x00000776},
- {0x0000b178, 0x00000776},
- {0x0000b17c, 0x00000776},
- {0x0000b180, 0x00000776},
- {0x0000b184, 0x00000776},
- {0x0000b188, 0x00000776},
- {0x0000b18c, 0x00000776},
- {0x0000b190, 0x00000776},
- {0x0000b194, 0x00000776},
- {0x0000b198, 0x00000776},
- {0x0000b19c, 0x00000776},
- {0x0000b1a0, 0x00000776},
- {0x0000b1a4, 0x00000776},
- {0x0000b1a8, 0x00000776},
- {0x0000b1ac, 0x00000776},
- {0x0000b1b0, 0x00000776},
- {0x0000b1b4, 0x00000776},
- {0x0000b1b8, 0x00000776},
- {0x0000b1bc, 0x00000776},
- {0x0000b1c0, 0x00000776},
- {0x0000b1c4, 0x00000776},
- {0x0000b1c8, 0x00000776},
- {0x0000b1cc, 0x00000776},
- {0x0000b1d0, 0x00000776},
- {0x0000b1d4, 0x00000776},
- {0x0000b1d8, 0x00000776},
- {0x0000b1dc, 0x00000776},
- {0x0000b1e0, 0x00000776},
- {0x0000b1e4, 0x00000776},
- {0x0000b1e8, 0x00000776},
- {0x0000b1ec, 0x00000776},
- {0x0000b1f0, 0x00000776},
- {0x0000b1f4, 0x00000776},
- {0x0000b1f8, 0x00000776},
- {0x0000b1fc, 0x00000776},
-};
-
-static const u32 ar9200_ar9280_2p0_radio_core[][2] = {
- /* Addr allmodes */
- {0x00007800, 0x00040000},
- {0x00007804, 0xdb005012},
- {0x00007808, 0x04924914},
- {0x0000780c, 0x21084210},
- {0x00007810, 0x6d801300},
- {0x00007814, 0x0019beff},
- {0x00007818, 0x07e41000},
- {0x0000781c, 0x00392000},
- {0x00007820, 0x92592480},
- {0x00007824, 0x00040000},
- {0x00007828, 0xdb005012},
- {0x0000782c, 0x04924914},
- {0x00007830, 0x21084210},
- {0x00007834, 0x6d801300},
- {0x00007838, 0x0019beff},
- {0x0000783c, 0x07e40000},
- {0x00007840, 0x00392000},
- {0x00007844, 0x92592480},
- {0x00007848, 0x00100000},
- {0x0000784c, 0x773f0567},
- {0x00007850, 0x54214514},
- {0x00007854, 0x12035828},
- {0x00007858, 0x92592692},
- {0x0000785c, 0x00000000},
- {0x00007860, 0x56400000},
- {0x00007864, 0x0a8e370e},
- {0x00007868, 0xc0102850},
- {0x0000786c, 0x812d4000},
- {0x00007870, 0x807ec400},
- {0x00007874, 0x001b6db0},
- {0x00007878, 0x00376b63},
- {0x0000787c, 0x06db6db6},
- {0x00007880, 0x006d8000},
- {0x00007884, 0xffeffffe},
- {0x00007888, 0xffeffffe},
- {0x0000788c, 0x00010000},
- {0x00007890, 0x02060aeb},
- {0x00007894, 0x5a108000},
-};
-
-static const u32 ar9462_2p0_mac_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00008014, 0x10f810f8, 0x10f810f8, 0x10f810f8, 0x10f810f8},
- {0x0000801c, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017, 0x0e8d8017},
-};
-
-static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-};
-
-static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e3c, 0xcf946221, 0xcf946221, 0xcf946221, 0xcf946221},
- {0x00009e44, 0xfc5c0000, 0xfc5c0000, 0xfc5c0000, 0xfc5c0000},
- {0x0000a258, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
- {0x0000a25c, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a28c, 0x00011111, 0x00011111, 0x00011111, 0x00011111},
- {0x0000a2c4, 0x00148d18, 0x00148d18, 0x00148d20, 0x00148d20},
- {0x0000a2d8, 0xf999a800, 0xf999a800, 0xf999a80c, 0xf999a80c},
- {0x0000a50c, 0x0000c00a, 0x0000c00a, 0x0000c00a, 0x0000c00a},
- {0x0000a538, 0x00038e8c, 0x00038e8c, 0x00038e8c, 0x00038e8c},
- {0x0000a53c, 0x0003cecc, 0x0003cecc, 0x0003cecc, 0x0003cecc},
- {0x0000a540, 0x00040ed4, 0x00040ed4, 0x00040ed4, 0x00040ed4},
- {0x0000a544, 0x00044edc, 0x00044edc, 0x00044edc, 0x00044edc},
- {0x0000a548, 0x00048ede, 0x00048ede, 0x00048ede, 0x00048ede},
- {0x0000a54c, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
- {0x0000a550, 0x00050f5e, 0x00050f5e, 0x00050f5e, 0x00050f5e},
- {0x0000a554, 0x00054f9e, 0x00054f9e, 0x00054f9e, 0x00054f9e},
- {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
@@ -1356,24 +1005,6 @@ static const u32 ar9462_2p0_radio_core[][2] = {
{0x00016548, 0x000080c0},
};
-static const u32 ar9462_2p0_tx_gain_table_baseband_postamble_emulation[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000000d5, 0x000000d5, 0x000000d5, 0x000000d5},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x00004002, 0x00004002, 0x00004002, 0x00004002},
- {0x0000a508, 0x00008004, 0x00008004, 0x00008004, 0x00008004},
- {0x0000a510, 0x0001000c, 0x0001000c, 0x0001000c, 0x0001000c},
- {0x0000a514, 0x0001420b, 0x0001420b, 0x0001420b, 0x0001420b},
- {0x0000a518, 0x0001824a, 0x0001824a, 0x0001824a, 0x0001824a},
- {0x0000a51c, 0x0001c44a, 0x0001c44a, 0x0001c44a, 0x0001c44a},
- {0x0000a520, 0x0002064a, 0x0002064a, 0x0002064a, 0x0002064a},
- {0x0000a524, 0x0002484a, 0x0002484a, 0x0002484a, 0x0002484a},
- {0x0000a528, 0x00028a4a, 0x00028a4a, 0x00028a4a, 0x00028a4a},
- {0x0000a52c, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
- {0x0000a530, 0x00030e4a, 0x00030e4a, 0x00030e4a, 0x00030e4a},
- {0x0000a534, 0x00034e8a, 0x00034e8a, 0x00034e8a, 0x00034e8a},
-};
-
static const u32 ar9462_2p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007020, 0x00000000},
@@ -1381,11 +1012,6 @@ static const u32 ar9462_2p0_soc_preamble[][2] = {
{0x00007038, 0x000004c2},
};
-static const u32 ar9462_2p0_sys2ant[][2] = {
- /* Addr allmodes */
- {0x00063120, 0x00801980},
-};
-
static const u32 ar9462_2p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -1822,75 +1448,6 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
{0x0000b1fc, 0x00000196},
};
-static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
- {0x00016054, 0x6db60180, 0x6db60180, 0x6db60180, 0x6db60180},
- {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
- {0x00016454, 0x6db60180, 0x6db60180, 0x6db60180, 0x6db60180},
-};
-
static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = {
/* Addr allmodes */
{0x000018c0, 0x10101010},
@@ -1903,26 +1460,4 @@ static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = {
{0x000018dc, 0x10101010},
};
-static const u32 ar9462_2p0_baseband_core_emulation[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafa68e30},
- {0x00009884, 0x00002842},
- {0x00009c04, 0xff55ff55},
- {0x00009c08, 0x0320ff55},
- {0x00009e50, 0x00000000},
- {0x00009fcc, 0x00000014},
- {0x0000a344, 0x00000010},
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x71733d01},
- {0x0000a3a0, 0xd0ad5c12},
- {0x0000a3c0, 0x22222220},
- {0x0000a3c4, 0x22222222},
- {0x0000a404, 0x00418a11},
- {0x0000a418, 0x050001ce},
- {0x0000a438, 0x00001800},
- {0x0000a458, 0x01444452},
- {0x0000a644, 0x3fad9d74},
- {0x0000a690, 0x00000038},
-};
-
#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 171ccf7c972f..8c84049682ab 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -299,7 +299,6 @@ struct ath_tx {
struct ath_rx_edma {
struct sk_buff_head rx_fifo;
- struct sk_buff_head rx_buffers;
u32 rx_fifo_hwsize;
};
@@ -454,9 +453,39 @@ struct ath_btcoex {
struct ath_mci_profile mci;
};
-int ath_init_btcoex_timer(struct ath_softc *sc);
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+int ath9k_init_btcoex(struct ath_softc *sc);
+void ath9k_deinit_btcoex(struct ath_softc *sc);
+void ath9k_start_btcoex(struct ath_softc *sc);
+void ath9k_stop_btcoex(struct ath_softc *sc);
void ath9k_btcoex_timer_resume(struct ath_softc *sc);
void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
+u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
+#else
+static inline int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ return 0;
+}
+static inline void ath9k_deinit_btcoex(struct ath_softc *sc)
+{
+}
+static inline void ath9k_start_btcoex(struct ath_softc *sc)
+{
+}
+static inline void ath9k_stop_btcoex(struct ath_softc *sc)
+{
+}
+static inline void ath9k_btcoex_handle_interrupt(struct ath_softc *sc,
+ u32 status)
+{
+}
+static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
+ u32 max_4ms_framelen)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
/********************/
/* LED Control */
@@ -554,19 +583,13 @@ struct ath_ant_comb {
#define SC_OP_INVALID BIT(0)
#define SC_OP_BEACONS BIT(1)
-#define SC_OP_RXAGGR BIT(2)
-#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_OFFCHANNEL BIT(4)
-#define SC_OP_PREAMBLE_SHORT BIT(5)
-#define SC_OP_PROTECT_ENABLE BIT(6)
-#define SC_OP_RXFLUSH BIT(7)
-#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_LED_ON BIT(9)
-#define SC_OP_TSF_RESET BIT(11)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
-#define SC_OP_BT_SCAN BIT(13)
-#define SC_OP_ANI_RUN BIT(14)
-#define SC_OP_PRIM_STA_VIF BIT(15)
+#define SC_OP_OFFCHANNEL BIT(2)
+#define SC_OP_RXFLUSH BIT(3)
+#define SC_OP_TSF_RESET BIT(4)
+#define SC_OP_BT_PRIORITY_DETECTED BIT(5)
+#define SC_OP_BT_SCAN BIT(6)
+#define SC_OP_ANI_RUN BIT(7)
+#define SC_OP_PRIM_STA_VIF BIT(8)
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
@@ -588,15 +611,12 @@ struct ath9k_vif_iter_data {
int nstations; /* number of station vifs */
int nwds; /* number of WDS vifs */
int nadhocs; /* number of adhoc vifs */
- int nothers; /* number of vifs not specified above. */
};
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
- int chan_idx;
- int chan_is_ht;
struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
@@ -650,8 +670,11 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
struct ath_mci_coex mci_coex;
+#endif
struct ath_descdma txsdma;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b8967e482e6e..626418222c85 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -67,7 +67,7 @@ int ath_beaconq_config(struct ath_softc *sc)
* up rate codes, and channel flags. Beacons are always sent out at the
* lowest rate, and are not retried.
*/
-static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
+static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
struct ath_buf *bf, int rateidx)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -82,7 +82,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
- if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
+ if (vif->bss_conf.use_short_preamble)
rate |= sband->bitrates[rateidx].hw_value_short;
memset(&info, 0, sizeof(info));
@@ -91,7 +91,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
info.txpower = MAX_RATE_POWER;
info.keyix = ATH9K_TXKEYIX_INVALID;
info.keytype = ATH9K_KEY_TYPE_CLEAR;
- info.flags = ATH9K_TXDESC_NOACK;
+ info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_INTREQ;
info.buf_addr[0] = bf->bf_buf_addr;
info.buf_len[0] = roundup(skb->len, 4);
@@ -209,7 +209,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
}
}
- ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx);
+ ath_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
while (skb) {
ath_tx_cabq(hw, skb);
@@ -355,7 +355,6 @@ void ath_beacon_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
- struct ath_tx_status ts;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
u32 bfaddr, bc = 0;
@@ -462,11 +461,6 @@ void ath_beacon_tasklet(unsigned long data)
ath9k_hw_txstart(ah, sc->beacon.beaconq);
sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
- if (edma) {
- spin_lock_bh(&sc->sc_pcu_lock);
- ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
- spin_unlock_bh(&sc->sc_pcu_lock);
- }
}
}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index a6712a95d76a..ec3271993411 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -68,9 +68,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
if (AR_SREV_9300_20_OR_LATER(ah))
rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
@@ -98,12 +95,43 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
}
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
-void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
+void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ /*
+ * Check if BTCOEX is globally disabled.
+ */
+ if (!common->btcoex_enabled) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
return;
+ }
+
+ if (AR_SREV_9462(ah)) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ } else if (AR_SREV_9300_20_OR_LATER(ah)) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
+ btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
+ btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
+ btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
+ btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
+
+ if (AR_SREV_9285(ah)) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
+ btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285;
+ } else {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
+ }
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
+
+void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* connect bt_active to baseband */
REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
@@ -127,9 +155,6 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
/* btcoex 3-wire */
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
@@ -152,13 +177,34 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
+void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
+{
+ ah->btcoex_hw.mci.ready = false;
+ ah->btcoex_hw.mci.bt_state = 0;
+ ah->btcoex_hw.mci.bt_ver_major = 3;
+ ah->btcoex_hw.mci.bt_ver_minor = 0;
+ ah->btcoex_hw.mci.bt_version_known = false;
+ ah->btcoex_hw.mci.update_2g5g = true;
+ ah->btcoex_hw.mci.is_2g = true;
+ ah->btcoex_hw.mci.wlan_channels_update = false;
+ ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+ ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+ ah->btcoex_hw.mci.query_bt = true;
+ ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+ ah->btcoex_hw.mci.halted_bt_gpm = false;
+ ah->btcoex_hw.mci.need_flush_btinfo = false;
+ ah->btcoex_hw.mci.wlan_cal_seq = 0;
+ ah->btcoex_hw.mci.wlan_cal_done = 0;
+ ah->btcoex_hw.mci.config = 0x2201;
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci);
+
static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
/* Configure the desired GPIO port for TX_FRAME output */
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
@@ -170,9 +216,6 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
SM(wlan_weight, AR_BTCOEX_WL_WGHT);
}
@@ -261,9 +304,6 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
int i;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
btcoex_hw->enabled = false;
if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
@@ -312,9 +352,6 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_btcoex_bt_stomp(ah, stomp_type);
return;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 278361c867ca..8f93aef4414f 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -67,7 +67,6 @@ struct ath9k_hw_mci {
u32 wlan_cal_done;
u32 config;
u8 *gpm_buf;
- u8 *sched_buf;
bool ready;
bool update_2g5g;
bool is_2g;
@@ -98,13 +97,14 @@ struct ath_btcoex_hw {
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
};
+void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
u32 bt_weight,
u32 wlan_weight);
-void ath9k_hw_btcoex_enable(struct ath_hw *ah);
void ath9k_hw_btcoex_disable(struct ath_hw *ah);
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 68d972bf232d..35d1c8e91d1c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -451,109 +451,6 @@ static const struct file_operations fops_interrupt = {
.llseek = default_llseek,
};
-static const char *channel_type_str(enum nl80211_channel_type t)
-{
- switch (t) {
- case NL80211_CHAN_NO_HT:
- return "no ht";
- case NL80211_CHAN_HT20:
- return "ht20";
- case NL80211_CHAN_HT40MINUS:
- return "ht40-";
- case NL80211_CHAN_HT40PLUS:
- return "ht40+";
- default:
- return "???";
- }
-}
-
-static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ieee80211_channel *chan = sc->hw->conf.channel;
- struct ieee80211_conf *conf = &(sc->hw->conf);
- char buf[512];
- unsigned int len = 0;
- u8 addr[ETH_ALEN];
- u32 tmp;
-
- len += snprintf(buf + len, sizeof(buf) - len,
- "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
- wiphy_name(sc->hw->wiphy),
- ieee80211_frequency_to_channel(chan->center_freq),
- chan->center_freq,
- conf->channel_type,
- channel_type_str(conf->channel_type));
-
- ath9k_ps_wakeup(sc);
- put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
- put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
- len += snprintf(buf + len, sizeof(buf) - len,
- "addr: %pM\n", addr);
- put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr);
- put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
- len += snprintf(buf + len, sizeof(buf) - len,
- "addrmask: %pM\n", addr);
- tmp = ath9k_hw_getrxfilter(sc->sc_ah);
- ath9k_ps_restore(sc);
- len += snprintf(buf + len, sizeof(buf) - len,
- "rfilt: 0x%x", tmp);
- if (tmp & ATH9K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
- if (tmp & ATH9K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
- if (tmp & ATH9K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
- if (tmp & ATH9K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
- if (tmp & ATH9K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
- if (tmp & ATH9K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
- if (tmp & ATH9K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
- if (tmp & ATH9K_RX_FILTER_PHYERR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
- if (tmp & ATH9K_RX_FILTER_MYBEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
- if (tmp & ATH9K_RX_FILTER_COMP_BAR)
- len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
- if (tmp & ATH9K_RX_FILTER_PSPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
- if (tmp & ATH9K_RX_FILTER_PHYRADAR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
- if (tmp & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
-
- len += snprintf(buf + len, sizeof(buf) - len,
- "\n\nReset causes:\n"
- " baseband hang: %d\n"
- " baseband watchdog: %d\n"
- " fatal hardware error interrupt: %d\n"
- " tx hardware error: %d\n"
- " tx path hang: %d\n"
- " pll rx hang: %d\n",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG],
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG],
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT],
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR],
- sc->debug.stats.reset[RESET_TYPE_TX_HANG],
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
-
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_wiphy = {
- .read = read_file_wiphy,
- .open = ath9k_debugfs_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
#define PR(str, elem) \
do { \
@@ -763,87 +660,128 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_hw *ah = sc->sc_ah;
struct ieee80211_hw *hw = sc->hw;
- char *buf;
- unsigned int len = 0, size = 8000;
+ struct ath9k_vif_iter_data iter_data;
+ char buf[512];
+ unsigned int len = 0;
ssize_t retval = 0;
unsigned int reg;
- struct ath9k_vif_iter_data iter_data;
+ u32 rxfilter;
- ath9k_calculate_iter_data(hw, NULL, &iter_data);
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "BSSID: %pM\n", common->curbssid);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "BSSID-MASK: %pM\n", common->bssidmask);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
- len += snprintf(buf + len, size - len,
- "curbssid: %pM\n"
- "OP-Mode: %s(%i)\n"
- "Beacon-Timer-Register: 0x%x\n",
- common->curbssid,
- ath_opmode_to_string(sc->sc_ah->opmode),
- (int)(sc->sc_ah->opmode),
- REG_READ(ah, AR_BEACON_PERIOD));
-
- reg = REG_READ(ah, AR_TIMER_MODE);
+ rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
- reg);
- if (reg & AR_TBTT_TIMER_EN)
- len += snprintf(buf + len, size - len, "TBTT ");
- if (reg & AR_DBA_TIMER_EN)
- len += snprintf(buf + len, size - len, "DBA ");
- if (reg & AR_SWBA_TIMER_EN)
- len += snprintf(buf + len, size - len, "SWBA ");
- if (reg & AR_HCF_TIMER_EN)
- len += snprintf(buf + len, size - len, "HCF ");
- if (reg & AR_TIM_TIMER_EN)
- len += snprintf(buf + len, size - len, "TIM ");
- if (reg & AR_DTIM_TIMER_EN)
- len += snprintf(buf + len, size - len, "DTIM ");
- len += snprintf(buf + len, size - len, ")\n");
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "RXFILTER: 0x%x", rxfilter);
+
+ if (rxfilter & ATH9K_RX_FILTER_UCAST)
+ len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ if (rxfilter & ATH9K_RX_FILTER_MCAST)
+ len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ if (rxfilter & ATH9K_RX_FILTER_BCAST)
+ len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ if (rxfilter & ATH9K_RX_FILTER_CONTROL)
+ len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ if (rxfilter & ATH9K_RX_FILTER_BEACON)
+ len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ if (rxfilter & ATH9K_RX_FILTER_PROM)
+ len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
+ len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ if (rxfilter & ATH9K_RX_FILTER_PHYERR)
+ len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+ if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
+ len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+ if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
+ len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+ if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
+ len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+ if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
+ len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+ if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
+ len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+ if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
+ len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+
+ len += snprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
- len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
+
+ len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+
if (reg & ATH9K_INT_SWBA)
- len += snprintf(buf + len, size - len, "SWBA ");
+ len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
- len += snprintf(buf + len, size - len, "BMISS ");
+ len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
- len += snprintf(buf + len, size - len, "CST ");
+ len += snprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
- len += snprintf(buf + len, size - len, "RX ");
+ len += snprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
- len += snprintf(buf + len, size - len, "RXHP ");
+ len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
- len += snprintf(buf + len, size - len, "RXLP ");
+ len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
- len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
- /* there are other IRQs if one wanted to add them. */
- len += snprintf(buf + len, size - len, ")\n");
+ len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
- len += snprintf(buf + len, size - len,
- "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
- " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "\n");
+
+ ath9k_calculate_iter_data(hw, NULL, &iter_data);
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.naps, iter_data.nstations, iter_data.nmeshes,
- iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
+ iter_data.nwds, iter_data.nadhocs,
sc->nvifs, sc->nbcnvifs);
- len += snprintf(buf + len, size - len,
- "Calculated-BSSID-Mask: %pM\n",
- iter_data.mask);
-
- if (len > size)
- len = size;
+ if (len > sizeof(buf))
+ len = sizeof(buf);
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
return retval;
}
+static ssize_t read_file_reset(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Hang",
+ sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Watchdog",
+ sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Fatal HW Error",
+ sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX HW error",
+ sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX Path Hang",
+ sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "PLL RX Hang",
+ sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
unsigned int flags)
@@ -880,6 +818,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
+#ifdef CONFIG_ATH9K_MAC_DEBUG
spin_lock(&sc->debug.samp_lock);
TX_SAMP_DBG(jiffies) = jiffies;
TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
@@ -906,6 +845,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
spin_unlock(&sc->debug.samp_lock);
+#endif
#undef TX_SAMP_DBG
}
@@ -931,16 +871,23 @@ static const struct file_operations fops_misc = {
.llseek = default_llseek,
};
+static const struct file_operations fops_reset = {
+ .read = read_file_reset,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
+ len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
sc->debug.stats.rxstats.phy_err_stats[p]);
struct ath_softc *sc = file->private_data;
char *buf;
- unsigned int len = 0, size = 1400;
+ unsigned int len = 0, size = 1600;
ssize_t retval = 0;
buf = kzalloc(size, GFP_KERNEL);
@@ -948,87 +895,59 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
return -ENOMEM;
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "CRC ERR",
+ "%22s : %10u\n", "CRC ERR",
sc->debug.stats.rxstats.crc_err);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "DECRYPT CRC ERR",
+ "%22s : %10u\n", "DECRYPT CRC ERR",
sc->debug.stats.rxstats.decrypt_crc_err);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "PHY ERR",
+ "%22s : %10u\n", "PHY ERR",
sc->debug.stats.rxstats.phy_err);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "MIC ERR",
+ "%22s : %10u\n", "MIC ERR",
sc->debug.stats.rxstats.mic_err);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "PRE-DELIM CRC ERR",
+ "%22s : %10u\n", "PRE-DELIM CRC ERR",
sc->debug.stats.rxstats.pre_delim_crc_err);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "POST-DELIM CRC ERR",
+ "%22s : %10u\n", "POST-DELIM CRC ERR",
sc->debug.stats.rxstats.post_delim_crc_err);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "DECRYPT BUSY ERR",
+ "%22s : %10u\n", "DECRYPT BUSY ERR",
sc->debug.stats.rxstats.decrypt_busy_err);
- len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "RSSI-CTL0",
- sc->debug.stats.rxstats.rs_rssi_ctl0);
-
- len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "RSSI-CTL1",
- sc->debug.stats.rxstats.rs_rssi_ctl1);
-
- len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "RSSI-CTL2",
- sc->debug.stats.rxstats.rs_rssi_ctl2);
-
- len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "RSSI-EXT0",
- sc->debug.stats.rxstats.rs_rssi_ext0);
+ PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "RSSI-EXT1",
- sc->debug.stats.rxstats.rs_rssi_ext1);
-
- len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "RSSI-EXT2",
- sc->debug.stats.rxstats.rs_rssi_ext2);
-
- len += snprintf(buf + len, size - len,
- "%18s : %10d\n", "Rx Antenna",
- sc->debug.stats.rxstats.rs_antenna);
-
- PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
- len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "RX-Pkts-All",
+ "%22s : %10u\n", "RX-Pkts-All",
sc->debug.stats.rxstats.rx_pkts_all);
len += snprintf(buf + len, size - len,
- "%18s : %10u\n", "RX-Bytes-All",
+ "%22s : %10u\n", "RX-Bytes-All",
sc->debug.stats.rxstats.rx_bytes_all);
if (len > size)
@@ -1049,8 +968,6 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
[sc->debug.rsidx].c)
- u32 phyerr;
-
RX_STAT_INC(rx_pkts_all);
sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
@@ -1069,20 +986,11 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
if (rs->rs_status & ATH9K_RXERR_PHY) {
RX_STAT_INC(phy_err);
- phyerr = rs->rs_phyerr & 0x24;
- RX_PHY_ERR_INC(phyerr);
+ if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
+ RX_PHY_ERR_INC(rs->rs_phyerr);
}
- sc->debug.stats.rxstats.rs_rssi_ctl0 = rs->rs_rssi_ctl0;
- sc->debug.stats.rxstats.rs_rssi_ctl1 = rs->rs_rssi_ctl1;
- sc->debug.stats.rxstats.rs_rssi_ctl2 = rs->rs_rssi_ctl2;
-
- sc->debug.stats.rxstats.rs_rssi_ext0 = rs->rs_rssi_ext0;
- sc->debug.stats.rxstats.rs_rssi_ext1 = rs->rs_rssi_ext1;
- sc->debug.stats.rxstats.rs_rssi_ext2 = rs->rs_rssi_ext2;
-
- sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
-
+#ifdef CONFIG_ATH9K_MAC_DEBUG
spin_lock(&sc->debug.samp_lock);
RX_SAMP_DBG(jiffies) = jiffies;
RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
@@ -1099,6 +1007,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
spin_unlock(&sc->debug.samp_lock);
+#endif
+
#undef RX_STAT_INC
#undef RX_PHY_ERR_INC
#undef RX_SAMP_DBG
@@ -1342,6 +1252,8 @@ static const struct file_operations fops_modal_eeprom = {
.llseek = default_llseek,
};
+#ifdef CONFIG_ATH9K_MAC_DEBUG
+
void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
{
#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
@@ -1615,6 +1527,7 @@ static const struct file_operations fops_samps = {
.llseek = default_llseek,
};
+#endif
int ath9k_init_debug(struct ath_hw *ah)
{
@@ -1637,14 +1550,14 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_interrupt);
- debugfs_create_file("wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- sc, &fops_wiphy);
debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_xmit);
debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_stations);
debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_misc);
+ debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_reset);
debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_recv);
debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
@@ -1668,8 +1581,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
+#ifdef CONFIG_ATH9K_MAC_DEBUG
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_samps);
+#endif
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
@@ -1677,10 +1592,5 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
- sc->debug.regidx = 0;
- memset(&sc->debug.bb_mac_samp, 0, sizeof(sc->debug.bb_mac_samp));
- sc->debug.sampidx = 0;
- sc->debug.tsidx = 0;
- sc->debug.rsidx = 0;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 776a24ada600..64fcfad467bf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -165,13 +165,6 @@ struct ath_rx_stats {
u32 post_delim_crc_err;
u32 decrypt_busy_err;
u32 phy_err_stats[ATH9K_PHYERR_MAX];
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
- u8 rs_antenna;
};
enum ath_reset_type {
@@ -235,16 +228,17 @@ struct ath9k_debug {
struct dentry *debugfs_phy;
u32 regidx;
struct ath_stats stats;
+#ifdef CONFIG_ATH9K_MAC_DEBUG
spinlock_t samp_lock;
struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
u8 sampidx;
u8 tsidx;
u8 rsidx;
+#endif
};
int ath9k_init_debug(struct ath_hw *ah);
-void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
@@ -258,10 +252,6 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
return 0;
}
-static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
-{
-}
-
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
@@ -282,4 +272,17 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc,
#endif /* CONFIG_ATH9K_DEBUGFS */
+#ifdef CONFIG_ATH9K_MAC_DEBUG
+
+void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
+
+#else
+
+static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
+{
+}
+
+#endif
+
+
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 597c84e31adb..fbe23de1297f 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -110,6 +110,8 @@ void ath_start_rfkill_poll(struct ath_softc *sc)
wiphy_rfkill_start_polling(sc->hw->wiphy);
}
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
/******************/
/* BTCOEX */
/******************/
@@ -245,13 +247,10 @@ static void ath_btcoex_no_stomp_timer(void *arg)
ath9k_ps_restore(sc);
}
-int ath_init_btcoex_timer(struct ath_softc *sc)
+static int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE)
- return 0;
-
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -284,9 +283,6 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
/* make sure duty cycle timer is also stopped when resuming */
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
@@ -307,9 +303,6 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
del_timer_sync(&btcoex->period_timer);
if (btcoex->hw_timer_enabled)
@@ -317,3 +310,114 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
btcoex->hw_timer_enabled = false;
}
+
+u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
+{
+ struct ath_mci_profile *mci = &sc->btcoex.mci;
+ u16 aggr_limit = 0;
+
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+ aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+ else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+ aggr_limit = min((max_4ms_framelen * 3) / 8,
+ (u32)ATH_AMPDU_LIMIT_MAX);
+
+ return aggr_limit;
+}
+
+void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
+ if (status & ATH9K_INT_GENTIMER)
+ ath_gen_timer_isr(sc->sc_ah);
+
+ if (status & ATH9K_INT_MCI)
+ ath_mci_intr(sc);
+}
+
+void ath9k_start_btcoex(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
+ !ah->btcoex_hw.enabled) {
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ ath9k_hw_btcoex_enable(ah);
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_btcoex_timer_resume(sc);
+ }
+}
+
+void ath9k_stop_btcoex(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
+ ath9k_hw_btcoex_disable(ah);
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_btcoex_timer_pause(sc);
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI)
+ ath_mci_flush_profile(&sc->btcoex.mci);
+ }
+}
+
+void ath9k_deinit_btcoex(struct ath_softc *sc)
+{
+ if ((sc->btcoex.no_stomp_timer) &&
+ ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
+ ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
+ ath_mci_cleanup(sc);
+}
+
+int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ struct ath_txq *txq;
+ struct ath_hw *ah = sc->sc_ah;
+ int r;
+
+ ath9k_hw_btcoex_init_scheme(ah);
+
+ switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ r = ath_init_btcoex_timer(sc);
+ if (r)
+ return -1;
+ txq = sc->tx.txq_map[WME_AC_BE];
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ case ATH_BTCOEX_CFG_MCI:
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
+
+ ath9k_hw_btcoex_init_mci(ah);
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 77c8ded8de57..424aabb2c730 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -968,8 +968,7 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
}
-static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
- u32 drv_info)
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
{
int transfer, err;
const void *data = hif_dev->firmware->data;
@@ -982,7 +981,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
return -ENOMEM;
while (len) {
- transfer = min_t(int, len, 4096);
+ transfer = min_t(size_t, len, 4096);
memcpy(buf, data, transfer);
err = usb_control_msg(hif_dev->udev,
@@ -1000,7 +999,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
}
kfree(buf);
- if (IS_AR7010_DEVICE(drv_info))
+ if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
firm_offset = AR7010_FIRMWARE_TEXT;
else
firm_offset = AR9271_FIRMWARE_TEXT;
@@ -1021,28 +1020,18 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
return 0;
}
-static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
{
- int ret, idx;
struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
struct usb_endpoint_descriptor *endp;
+ int ret, idx;
- /* Request firmware */
- ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
- &hif_dev->udev->dev);
- if (ret) {
- dev_err(&hif_dev->udev->dev,
- "ath9k_htc: Firmware - %s not found\n", hif_dev->fw_name);
- goto err_fw_req;
- }
-
- /* Download firmware */
- ret = ath9k_hif_usb_download_fw(hif_dev, drv_info);
+ ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Firmware - %s download failed\n",
hif_dev->fw_name);
- goto err_fw_download;
+ return ret;
}
/* On downloading the firmware to the target, the USB descriptor of EP4
@@ -1064,23 +1053,84 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Unable to allocate URBs\n");
- goto err_fw_download;
+ return ret;
}
return 0;
-
-err_fw_download:
- release_firmware(hif_dev->firmware);
-err_fw_req:
- hif_dev->firmware = NULL;
- return ret;
}
static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
{
ath9k_hif_usb_dealloc_urbs(hif_dev);
- if (hif_dev->firmware)
- release_firmware(hif_dev->firmware);
+}
+
+/*
+ * If initialization fails or the FW cannot be retrieved,
+ * detach the device.
+ */
+static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
+{
+ struct device *parent = hif_dev->udev->dev.parent;
+
+ complete(&hif_dev->fw_done);
+
+ if (parent)
+ device_lock(parent);
+
+ device_release_driver(&hif_dev->udev->dev);
+
+ if (parent)
+ device_unlock(parent);
+}
+
+static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
+{
+ struct hif_device_usb *hif_dev = context;
+ int ret;
+
+ if (!fw) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Failed to get firmware %s\n",
+ hif_dev->fw_name);
+ goto err_fw;
+ }
+
+ hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
+ &hif_dev->udev->dev);
+ if (hif_dev->htc_handle == NULL) {
+ goto err_fw;
+ }
+
+ hif_dev->firmware = fw;
+
+ /* Proceed with initialization */
+
+ ret = ath9k_hif_usb_dev_init(hif_dev);
+ if (ret)
+ goto err_dev_init;
+
+ ret = ath9k_htc_hw_init(hif_dev->htc_handle,
+ &hif_dev->interface->dev,
+ hif_dev->usb_device_id->idProduct,
+ hif_dev->udev->product,
+ hif_dev->usb_device_id->driver_info);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_htc_hw_init;
+ }
+
+ complete(&hif_dev->fw_done);
+
+ return;
+
+err_htc_hw_init:
+ ath9k_hif_usb_dev_deinit(hif_dev);
+err_dev_init:
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ release_firmware(fw);
+ hif_dev->firmware = NULL;
+err_fw:
+ ath9k_hif_usb_firmware_fail(hif_dev);
}
/*
@@ -1155,20 +1205,16 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
}
usb_get_dev(udev);
+
hif_dev->udev = udev;
hif_dev->interface = interface;
- hif_dev->device_id = id->idProduct;
+ hif_dev->usb_device_id = id;
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
usb_set_intfdata(interface, hif_dev);
- hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
- &hif_dev->udev->dev);
- if (hif_dev->htc_handle == NULL) {
- ret = -ENOMEM;
- goto err_htc_hw_alloc;
- }
+ init_completion(&hif_dev->fw_done);
/* Find out which firmware to load */
@@ -1177,29 +1223,22 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
else
hif_dev->fw_name = FIRMWARE_AR9271;
- ret = ath9k_hif_usb_dev_init(hif_dev, id->driver_info);
- if (ret) {
- ret = -EINVAL;
- goto err_hif_init_usb;
- }
-
- ret = ath9k_htc_hw_init(hif_dev->htc_handle,
- &interface->dev, hif_dev->device_id,
- hif_dev->udev->product, id->driver_info);
+ ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
+ &hif_dev->udev->dev, GFP_KERNEL,
+ hif_dev, ath9k_hif_usb_firmware_cb);
if (ret) {
- ret = -EINVAL;
- goto err_htc_hw_init;
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Async request for firmware %s failed\n",
+ hif_dev->fw_name);
+ goto err_fw_req;
}
- dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n",
+ hif_dev->fw_name);
return 0;
-err_htc_hw_init:
- ath9k_hif_usb_dev_deinit(hif_dev);
-err_hif_init_usb:
- ath9k_htc_hw_free(hif_dev->htc_handle);
-err_htc_hw_alloc:
+err_fw_req:
usb_set_intfdata(interface, NULL);
kfree(hif_dev);
usb_put_dev(udev);
@@ -1234,9 +1273,15 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (!hif_dev)
return;
- ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_htc_hw_free(hif_dev->htc_handle);
- ath9k_hif_usb_dev_deinit(hif_dev);
+ wait_for_completion(&hif_dev->fw_done);
+
+ if (hif_dev->firmware) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ release_firmware(hif_dev->firmware);
+ }
+
usb_set_intfdata(interface, NULL);
if (!unplugged && (hif_dev->flags & HIF_USB_START))
@@ -1276,8 +1321,7 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
return ret;
if (hif_dev->firmware) {
- ret = ath9k_hif_usb_download_fw(hif_dev,
- htc_handle->drv_priv->ah->hw_version.usbdev);
+ ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret)
goto fail_resume;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 794f63094e5d..487ff658b4c1 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -87,10 +87,11 @@ struct cmd_buf {
#define HIF_USB_START BIT(0)
struct hif_device_usb {
- u16 device_id;
struct usb_device *udev;
struct usb_interface *interface;
+ const struct usb_device_id *usb_device_id;
const struct firmware *firmware;
+ struct completion fw_done;
struct htc_target *htc_handle;
struct hif_usb_tx tx;
struct usb_anchor regout_submitted;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index da5596766d82..135795257d95 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -400,9 +400,21 @@ struct ath_btcoex {
u32 btscan_no_stomp;
};
-void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv);
-void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv);
-void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product);
+void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv);
+#else
+static inline void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
+{
+}
+static inline void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
+{
+}
+static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
+{
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#define OP_INVALID BIT(0)
#define OP_SCANNING BIT(1)
@@ -483,7 +495,10 @@ struct ath9k_htc_priv {
int cabq;
int hwq_map[WME_NUM_AC];
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
+#endif
+
struct delayed_work coex_period_work;
struct delayed_work duty_cycle_work;
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 6506e1fd5036..1c10e2e5c237 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -20,6 +20,10 @@
/* BTCOEX */
/******************/
+#define ATH_HTC_BTCOEX_PRODUCT_ID "wb193"
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
/*
* Detects if there is any priority bt traffic
*/
@@ -111,13 +115,10 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
ath9k_hw_btcoex_enable(priv->ah);
}
-void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
+static void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
{
struct ath_btcoex *btcoex = &priv->btcoex;
- if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -131,14 +132,11 @@ void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
* (Re)start btcoex work
*/
-void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
+static void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
{
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_hw *ah = priv->ah;
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n");
btcoex->bt_priority_cnt = 0;
@@ -151,15 +149,66 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
/*
* Cancel btcoex and bt duty cycle work.
*/
-void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
+static void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
{
- if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
- return;
-
cancel_delayed_work_sync(&priv->coex_period_work);
cancel_delayed_work_sync(&priv->duty_cycle_work);
}
+void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ ath9k_hw_btcoex_enable(ah);
+ ath_htc_resume_btcoex_work(priv);
+ }
+}
+
+void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
+ ath9k_hw_btcoex_disable(ah);
+ if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_htc_cancel_btcoex_work(priv);
+ }
+}
+
+void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
+{
+ struct ath_hw *ah = priv->ah;
+ int qnum;
+
+ if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
+ ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
+ }
+
+ switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ priv->ah->btcoex_hw.btactive_gpio = 7;
+ priv->ah->btcoex_hw.btpriority_gpio = 6;
+ priv->ah->btcoex_hw.wlanactive_gpio = 8;
+ priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ ath9k_hw_btcoex_init_3wire(priv->ah);
+ ath_htc_init_btcoex_work(priv);
+ qnum = priv->hwq_map[WME_AC_BE];
+ ath9k_hw_init_btcoex_hw(priv->ah, qnum);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
/*******/
/* LED */
/*******/
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 9be10a2da1c2..de5ee15ee639 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -41,8 +41,6 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
.max_power = 20, \
}
-#define ATH_HTC_BTCOEX_PRODUCT_ID "wb193"
-
static struct ieee80211_channel ath9k_2ghz_channels[] = {
CHAN2G(2412, 0), /* Channel 1 */
CHAN2G(2417, 1), /* Channel 2 */
@@ -603,29 +601,6 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
-static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
-{
- int qnum;
-
- switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- priv->ah->btcoex_hw.btactive_gpio = 7;
- priv->ah->btcoex_hw.btpriority_gpio = 6;
- priv->ah->btcoex_hw.wlanactive_gpio = 8;
- priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- ath9k_hw_btcoex_init_3wire(priv->ah);
- ath_htc_init_btcoex_work(priv);
- qnum = priv->hwq_map[WME_AC_BE];
- ath9k_hw_init_btcoex_hw(priv->ah, qnum);
- break;
- default:
- WARN_ON(1);
- break;
- }
-}
-
static int ath9k_init_priv(struct ath9k_htc_priv *priv,
u16 devid, char *product,
u32 drv_info)
@@ -698,12 +673,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
ath9k_cmn_init_crypto(ah);
ath9k_init_channels_rates(priv);
ath9k_init_misc(priv);
-
- if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
- ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
- if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
- ath9k_init_btcoex(priv);
- }
+ ath9k_htc_init_btcoex(priv, product);
return 0;
@@ -741,6 +711,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
hw->queues = 4;
hw->channel_change_time = 5000;
hw->max_listen_interval = 10;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index ef4c60661290..2b8f61c210e1 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -919,7 +919,6 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
/* setup initial channel */
init_channel = ath9k_cmn_get_curchannel(hw, ah);
- ath9k_hw_htc_resetinit(ah);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
ath_err(common,
@@ -957,12 +956,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- ath9k_hw_btcoex_enable(ah);
- ath_htc_resume_btcoex_work(priv);
- }
+ ath9k_htc_start_btcoex(priv);
+
mutex_unlock(&priv->mutex);
return ret;
@@ -1009,12 +1004,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (ah->btcoex_hw.enabled &&
- ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
- ath9k_hw_btcoex_disable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
- ath_htc_cancel_btcoex_work(priv);
- }
+ ath9k_htc_stop_btcoex(priv);
/* Remove a monitor interface if it's present. */
if (priv->ah->is_monitoring)
@@ -1409,6 +1399,21 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
if (htc_modparam_nohwcrypt)
return -ENOSPC;
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /*
+ * For now, disable hw crypto for the RSN IBSS group keys. This
+ * could be optimized in the future to use a modified key cache
+ * design to support per-STA RX GTK, but until that gets
+ * implemented, use of software crypto for group addressed
+ * frames is a acceptable to allow RSN IBSS to be used.
+ */
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&priv->mutex);
ath_dbg(common, CONFIG, "Set HW Key\n");
ath9k_htc_ps_wakeup(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 1b90ed8795c3..c25226a32ddc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -431,11 +431,8 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
struct htc_target *target;
target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
- if (!target) {
- printk(KERN_ERR "Unable to allocate memory for"
- "target device\n");
+ if (!target)
return NULL;
- }
init_completion(&target->target_wait);
init_completion(&target->cmd_wait);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index c4ad0b06bdbc..265bf77598a2 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -24,7 +24,7 @@
static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
- if (ah->aspm_enabled != true)
+ if (!ah->aspm_enabled)
return;
ath9k_hw_ops(ah)->config_pci_powersave(ah, power_off);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 87db1ee1c298..6c69e4e8b1cb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,6 +23,7 @@
#include "hw-ops.h"
#include "rc.h"
#include "ar9003_mac.h"
+#include "ar9003_mci.h"
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
@@ -448,6 +449,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->slottime = ATH9K_SLOT_TIME_9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
+ ah->htc_reset_init = true;
}
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
@@ -554,7 +556,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
- if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
!ah->is_pciexpress)) {
@@ -618,9 +620,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ar9002_hw_cck_chan14_spread(ah);
-
r = ath9k_hw_post_init(ah);
if (r)
return r;
@@ -1385,10 +1384,16 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) {
- if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON))
- return false;
- } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
+ int reset_type = ATH9K_RESET_WARM;
+
+ if (AR_SREV_9280(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ reset_type = ATH9K_RESET_POWER_ON;
+ else
+ reset_type = ATH9K_RESET_COLD;
+ }
+
+ if (!ath9k_hw_set_reset_reg(ah, reset_type))
return false;
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
@@ -1514,70 +1519,95 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_check_alive);
-int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
- struct ath9k_hw_cal_data *caldata, bool bChannelChange)
+/*
+ * Fast channel change:
+ * (Change synthesizer based on channel freq without resetting chip)
+ *
+ * Don't do FCC when
+ * - Flag is not set
+ * - Chip is just coming out of full sleep
+ * - Channel to be set is same as current channel
+ * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
+ */
+static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
- u32 saveLedState;
- struct ath9k_channel *curchan = ah->curchan;
- u32 saveDefAntenna;
- u32 macStaId1;
- u64 tsf = 0;
- int i, r;
- bool allow_fbs = false;
- bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
- bool save_fullsleep = ah->chip_fullsleep;
+ int ret;
- if (mci) {
+ if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
+ goto fail;
- ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+ if (ah->chip_fullsleep)
+ goto fail;
- if (mci_hw->bt_state == MCI_BT_CAL_START) {
- u32 payload[4] = {0, 0, 0, 0};
+ if (!ah->curchan)
+ goto fail;
- ath_dbg(common, MCI, "MCI stop rx for BT CAL\n");
+ if (chan->channel == ah->curchan->channel)
+ goto fail;
- mci_hw->bt_state = MCI_BT_CAL;
+ if ((chan->channelFlags & CHANNEL_ALL) !=
+ (ah->curchan->channelFlags & CHANNEL_ALL))
+ goto fail;
- /*
- * MCI FIX: disable mci interrupt here. This is to avoid
- * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
- * lead to mci_intr reentry.
- */
+ if (!ath9k_hw_check_alive(ah))
+ goto fail;
- ar9003_mci_disable_interrupt(ah);
+ /*
+ * For AR9462, make sure that calibration data for
+ * re-using are present.
+ */
+ if (AR_SREV_9462(ah) && (!ah->caldata ||
+ !ah->caldata->done_txiqcal_once ||
+ !ah->caldata->done_txclcal_once ||
+ !ah->caldata->rtt_hist.num_readings))
+ goto fail;
- ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n");
- MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
- ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
- 16, true, false);
+ ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
+ ah->curchan->channel, chan->channel);
- ath_dbg(common, MCI, "\nMCI BT is calibrating\n");
+ ret = ath9k_hw_channel_change(ah, chan);
+ if (!ret)
+ goto fail;
- /* Wait BT calibration to be completed for 25ms */
+ ath9k_hw_loadnf(ah, ah->curchan);
+ ath9k_hw_start_nfcal(ah, true);
- if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
- 0, 25000))
- ath_dbg(common, MCI,
- "MCI got BT_CAL_DONE\n");
- else
- ath_dbg(common, MCI,
- "MCI ### BT cal takes to long, force bt_state to be bt_awake\n");
- mci_hw->bt_state = MCI_BT_AWAKE;
- /* MCI FIX: enable mci interrupt here */
- ar9003_mci_enable_interrupt(ah);
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah))
+ ar9003_mci_2g5g_switch(ah, true);
- return true;
- }
- }
+ if (AR_SREV_9271(ah))
+ ar9002_hw_load_ani_reg(ah, chan);
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata, bool fastcc)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 saveLedState;
+ u32 saveDefAntenna;
+ u32 macStaId1;
+ u64 tsf = 0;
+ int i, r;
+ bool start_mci_reset = false;
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+ bool save_fullsleep = ah->chip_fullsleep;
+ if (mci) {
+ start_mci_reset = ar9003_mci_start_reset(ah, chan);
+ if (start_mci_reset)
+ return 0;
+ }
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
- if (curchan && !ah->chip_fullsleep)
- ath9k_hw_getnf(ah, curchan);
+ if (ah->curchan && !ah->chip_fullsleep)
+ ath9k_hw_getnf(ah, ah->curchan);
ah->caldata = caldata;
if (caldata &&
@@ -1590,47 +1620,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
ah->noise = ath9k_hw_getchan_noise(ah, chan);
- if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
- bChannelChange = false;
-
- if (caldata &&
- caldata->done_txiqcal_once &&
- caldata->done_txclcal_once &&
- caldata->rtt_hist.num_readings)
- allow_fbs = true;
-
- if (bChannelChange &&
- (ah->chip_fullsleep != true) &&
- (ah->curchan != NULL) &&
- (chan->channel != ah->curchan->channel) &&
- (allow_fbs ||
- ((chan->channelFlags & CHANNEL_ALL) ==
- (ah->curchan->channelFlags & CHANNEL_ALL)))) {
- if (ath9k_hw_channel_change(ah, chan)) {
- ath9k_hw_loadnf(ah, ah->curchan);
- ath9k_hw_start_nfcal(ah, true);
- if (mci && mci_hw->ready)
- ar9003_mci_2g5g_switch(ah, true);
-
- if (AR_SREV_9271(ah))
- ar9002_hw_load_ani_reg(ah, chan);
- return 0;
- }
- }
-
- if (mci) {
- ar9003_mci_disable_interrupt(ah);
-
- if (mci_hw->ready && !save_fullsleep) {
- ar9003_mci_mute_bt(ah);
- udelay(20);
- REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
- }
-
- mci_hw->bt_state = MCI_BT_SLEEP;
- mci_hw->ready = false;
+ if (fastcc) {
+ r = ath9k_hw_do_fastcc(ah, chan);
+ if (!r)
+ return r;
}
+ if (mci)
+ ar9003_mci_stop_bt(ah, save_fullsleep);
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
@@ -1807,53 +1804,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
- if (mci && mci_hw->ready) {
-
- if (IS_CHAN_2GHZ(chan) &&
- (mci_hw->bt_state == MCI_BT_SLEEP)) {
-
- if (ar9003_mci_check_int(ah,
- AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
- ar9003_mci_check_int(ah,
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
-
- /*
- * BT is sleeping. Check if BT wakes up during
- * WLAN calibration. If BT wakes up during
- * WLAN calibration, need to go through all
- * message exchanges again and recal.
- */
-
- ath_dbg(common, MCI,
- "MCI BT wakes up during WLAN calibration\n");
-
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
- ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
- ar9003_mci_remote_reset(ah, true);
- ar9003_mci_send_sys_waking(ah, true);
- udelay(1);
- if (IS_CHAN_2GHZ(chan))
- ar9003_mci_send_lna_transfer(ah, true);
-
- mci_hw->bt_state = MCI_BT_AWAKE;
-
- ath_dbg(common, MCI, "MCI re-cal\n");
-
- if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_hist.num_readings = 0;
- }
-
- if (!ath9k_hw_init_cal(ah, chan))
- return -EIO;
-
- }
- }
- ar9003_mci_enable_interrupt(ah);
- }
+ if (mci && ar9003_mci_end_reset(ah, chan, caldata))
+ return -EIO;
ENABLE_REGWRITE_BUFFER(ah);
@@ -1894,24 +1846,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
#endif
}
- if (ah->btcoex_hw.enabled &&
- ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
+ if (ath9k_hw_btcoex_is_enabled(ah))
ath9k_hw_btcoex_enable(ah);
- if (mci && mci_hw->ready) {
- /*
- * check BT state again to make
- * sure it's not changed.
- */
-
- ar9003_mci_sync_bt_state(ah);
- ar9003_mci_2g5g_switch(ah, true);
-
- if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
- (mci_hw->query_bt == true)) {
- mci_hw->need_flush_btinfo = true;
- }
- }
+ if (mci)
+ ar9003_mci_check_bt(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -1962,8 +1901,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
/* Shutdown chip. Active low */
- if (!AR_SREV_5416(ah) &&
- !AR_SREV_9271(ah) && !AR_SREV_9462_10(ah)) {
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
udelay(2);
}
@@ -2038,8 +1976,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
if (setChip) {
if ((REG_READ(ah, AR_RTC_STATUS) &
AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
- if (ath9k_hw_set_reset_reg(ah,
- ATH9K_RESET_POWER_ON) != true) {
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
return false;
}
if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -2077,7 +2014,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
@@ -2101,20 +2037,8 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
break;
case ATH9K_PM_FULL_SLEEP:
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
- if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
- (mci->bt_state != MCI_BT_SLEEP) &&
- !mci->halted_bt_gpm) {
- ath_dbg(common, MCI,
- "MCI halt BT GPM (full_sleep)\n");
- ar9003_mci_send_coex_halt_bt_gpm(ah,
- true, true);
- }
-
- mci->ready = false;
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
- }
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ar9003_mci_set_full_sleep(ah);
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
@@ -2304,7 +2228,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
unsigned int chip_chainmask;
u16 eeval;
@@ -2423,30 +2346,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
- if (common->btcoex_enabled) {
- if (AR_SREV_9462(ah))
- btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
- else if (AR_SREV_9300_20_OR_LATER(ah)) {
- btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
- btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
- btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
- btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
- btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
-
- if (AR_SREV_9285(ah)) {
- btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
- btcoex_hw->btpriority_gpio =
- ATH_BTPRIORITY_GPIO_9285;
- } else {
- btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
- }
- }
- } else {
- btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
- }
-
if (AR_SREV_9300_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah))
@@ -2530,8 +2429,17 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- if (AR_SREV_9462(ah))
- pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
+
+ if (AR_SREV_9462(ah)) {
+
+ if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
+ pCap->hw_caps |= ATH9K_HW_CAP_MCI;
+
+ if (AR_SREV_9462_20(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+
+ }
+
return 0;
}
@@ -2657,12 +2565,6 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
}
EXPORT_SYMBOL(ath9k_hw_set_gpio);
-u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
-{
- return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
-}
-EXPORT_SYMBOL(ath9k_hw_getdefantenna);
-
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
{
REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
@@ -2720,6 +2622,7 @@ bool ath9k_hw_phy_disable(struct ath_hw *ah)
return false;
ath9k_hw_init_pll(ah, NULL);
+ ah->htc_reset_init = true;
return true;
}
EXPORT_SYMBOL(ath9k_hw_phy_disable);
@@ -3080,12 +2983,6 @@ EXPORT_SYMBOL(ath_gen_timer_isr);
/* HTC */
/********/
-void ath9k_hw_htc_resetinit(struct ath_hw *ah)
-{
- ah->htc_reset_init = true;
-}
-EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
-
static struct {
u32 version;
const char * name;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index c8261d4fc780..aa1680a0c7fd 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -209,11 +209,7 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_5GHZ = BIT(12),
ATH9K_HW_CAP_APM = BIT(13),
ATH9K_HW_CAP_RTT = BIT(14),
-#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ATH9K_HW_CAP_MCI = BIT(15),
-#else
- ATH9K_HW_CAP_MCI = 0,
-#endif
ATH9K_HW_CAP_DFS = BIT(16),
};
@@ -432,161 +428,6 @@ enum ath9k_rx_qtype {
ATH9K_RX_QUEUE_MAX,
};
-enum mci_message_header { /* length of payload */
- MCI_LNA_CTRL = 0x10, /* len = 0 */
- MCI_CONT_NACK = 0x20, /* len = 0 */
- MCI_CONT_INFO = 0x30, /* len = 4 */
- MCI_CONT_RST = 0x40, /* len = 0 */
- MCI_SCHD_INFO = 0x50, /* len = 16 */
- MCI_CPU_INT = 0x60, /* len = 4 */
- MCI_SYS_WAKING = 0x70, /* len = 0 */
- MCI_GPM = 0x80, /* len = 16 */
- MCI_LNA_INFO = 0x90, /* len = 1 */
- MCI_LNA_STATE = 0x94,
- MCI_LNA_TAKE = 0x98,
- MCI_LNA_TRANS = 0x9c,
- MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
- MCI_REQ_WAKE = 0xc0, /* len = 0 */
- MCI_DEBUG_16 = 0xfe, /* len = 2 */
- MCI_REMOTE_RESET = 0xff /* len = 16 */
-};
-
-enum ath_mci_gpm_coex_profile_type {
- MCI_GPM_COEX_PROFILE_UNKNOWN,
- MCI_GPM_COEX_PROFILE_RFCOMM,
- MCI_GPM_COEX_PROFILE_A2DP,
- MCI_GPM_COEX_PROFILE_HID,
- MCI_GPM_COEX_PROFILE_BNEP,
- MCI_GPM_COEX_PROFILE_VOICE,
- MCI_GPM_COEX_PROFILE_MAX
-};
-
-/* MCI GPM/Coex opcode/type definitions */
-enum {
- MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
- MCI_GPM_COEX_B_GPM_TYPE = 4,
- MCI_GPM_COEX_B_GPM_OPCODE = 5,
- /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
- MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
-
- /* MCI_GPM_COEX_VERSION_QUERY */
- /* MCI_GPM_COEX_VERSION_RESPONSE */
- MCI_GPM_COEX_B_MAJOR_VERSION = 6,
- MCI_GPM_COEX_B_MINOR_VERSION = 7,
- /* MCI_GPM_COEX_STATUS_QUERY */
- MCI_GPM_COEX_B_BT_BITMAP = 6,
- MCI_GPM_COEX_B_WLAN_BITMAP = 7,
- /* MCI_GPM_COEX_HALT_BT_GPM */
- MCI_GPM_COEX_B_HALT_STATE = 6,
- /* MCI_GPM_COEX_WLAN_CHANNELS */
- MCI_GPM_COEX_B_CHANNEL_MAP = 6,
- /* MCI_GPM_COEX_BT_PROFILE_INFO */
- MCI_GPM_COEX_B_PROFILE_TYPE = 6,
- MCI_GPM_COEX_B_PROFILE_LINKID = 7,
- MCI_GPM_COEX_B_PROFILE_STATE = 8,
- MCI_GPM_COEX_B_PROFILE_ROLE = 9,
- MCI_GPM_COEX_B_PROFILE_RATE = 10,
- MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
- MCI_GPM_COEX_H_PROFILE_T = 12,
- MCI_GPM_COEX_B_PROFILE_W = 14,
- MCI_GPM_COEX_B_PROFILE_A = 15,
- /* MCI_GPM_COEX_BT_STATUS_UPDATE */
- MCI_GPM_COEX_B_STATUS_TYPE = 6,
- MCI_GPM_COEX_B_STATUS_LINKID = 7,
- MCI_GPM_COEX_B_STATUS_STATE = 8,
- /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
- MCI_GPM_COEX_W_BT_FLAGS = 6,
- MCI_GPM_COEX_B_BT_FLAGS_OP = 10
-};
-
-enum mci_gpm_subtype {
- MCI_GPM_BT_CAL_REQ = 0,
- MCI_GPM_BT_CAL_GRANT = 1,
- MCI_GPM_BT_CAL_DONE = 2,
- MCI_GPM_WLAN_CAL_REQ = 3,
- MCI_GPM_WLAN_CAL_GRANT = 4,
- MCI_GPM_WLAN_CAL_DONE = 5,
- MCI_GPM_COEX_AGENT = 0x0c,
- MCI_GPM_RSVD_PATTERN = 0xfe,
- MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
- MCI_GPM_BT_DEBUG = 0xff
-};
-
-enum mci_bt_state {
- MCI_BT_SLEEP,
- MCI_BT_AWAKE,
- MCI_BT_CAL_START,
- MCI_BT_CAL
-};
-
-/* Type of state query */
-enum mci_state_type {
- MCI_STATE_ENABLE,
- MCI_STATE_INIT_GPM_OFFSET,
- MCI_STATE_NEXT_GPM_OFFSET,
- MCI_STATE_LAST_GPM_OFFSET,
- MCI_STATE_BT,
- MCI_STATE_SET_BT_SLEEP,
- MCI_STATE_SET_BT_AWAKE,
- MCI_STATE_SET_BT_CAL_START,
- MCI_STATE_SET_BT_CAL,
- MCI_STATE_LAST_SCHD_MSG_OFFSET,
- MCI_STATE_REMOTE_SLEEP,
- MCI_STATE_CONT_RSSI_POWER,
- MCI_STATE_CONT_PRIORITY,
- MCI_STATE_CONT_TXRX,
- MCI_STATE_RESET_REQ_WAKE,
- MCI_STATE_SEND_WLAN_COEX_VERSION,
- MCI_STATE_SET_BT_COEX_VERSION,
- MCI_STATE_SEND_WLAN_CHANNELS,
- MCI_STATE_SEND_VERSION_QUERY,
- MCI_STATE_SEND_STATUS_QUERY,
- MCI_STATE_NEED_FLUSH_BT_INFO,
- MCI_STATE_SET_CONCUR_TX_PRI,
- MCI_STATE_RECOVER_RX,
- MCI_STATE_NEED_FTP_STOMP,
- MCI_STATE_NEED_TUNING,
- MCI_STATE_DEBUG,
- MCI_STATE_MAX
-};
-
-enum mci_gpm_coex_opcode {
- MCI_GPM_COEX_VERSION_QUERY,
- MCI_GPM_COEX_VERSION_RESPONSE,
- MCI_GPM_COEX_STATUS_QUERY,
- MCI_GPM_COEX_HALT_BT_GPM,
- MCI_GPM_COEX_WLAN_CHANNELS,
- MCI_GPM_COEX_BT_PROFILE_INFO,
- MCI_GPM_COEX_BT_STATUS_UPDATE,
- MCI_GPM_COEX_BT_UPDATE_FLAGS
-};
-
-#define MCI_GPM_NOMORE 0
-#define MCI_GPM_MORE 1
-#define MCI_GPM_INVALID 0xffffffff
-
-#define MCI_GPM_RECYCLE(_p_gpm) do { \
- *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
- MCI_GPM_RSVD_PATTERN32; \
-} while (0)
-
-#define MCI_GPM_TYPE(_p_gpm) \
- (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
-
-#define MCI_GPM_OPCODE(_p_gpm) \
- (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
-
-#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
- *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
-} while (0)
-
-#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
- *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
- *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
-} while (0)
-
-#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
-
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -956,8 +797,9 @@ struct ath_hw {
int firpwr[5];
enum ath9k_ani_cmd ani_function;
- /* Bluetooth coexistance */
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex_hw btcoex_hw;
+#endif
u32 intr_txqs;
u8 txchainmask;
@@ -985,19 +827,14 @@ struct ath_hw {
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
struct ar5416IniArray iniPcieSerdesLowPower;
- struct ar5416IniArray iniModesAdditional;
- struct ar5416IniArray iniModesAdditional_40M;
+ struct ar5416IniArray iniModesFastClock;
+ struct ar5416IniArray iniAdditional;
struct ar5416IniArray iniModesRxGain;
struct ar5416IniArray iniModesTxGain;
- struct ar5416IniArray iniModes_9271_1_0_only;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
struct ar5416IniArray ini_japan2484;
- struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
- struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
struct ar5416IniArray iniModes_9271_ANI_reg;
- struct ar5416IniArray iniModes_high_power_tx_gain_9271;
- struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
struct ar5416IniArray ini_radio_post_sys2ant;
struct ar5416IniArray ini_BTCOEX_MAX_TXPWR;
@@ -1082,7 +919,7 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid);
void ath9k_hw_deinit(struct ath_hw *ah);
int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
- struct ath9k_hw_cal_data *caldata, bool bChannelChange);
+ struct ath9k_hw_cal_data *caldata, bool fastcc);
int ath9k_hw_fill_cap_info(struct ath_hw *ah);
u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
@@ -1092,7 +929,6 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
u32 ah_signal_type);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
-u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
/* General Operation */
@@ -1146,9 +982,6 @@ void ath_gen_timer_isr(struct ath_hw *hw);
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
-/* HTC */
-void ath9k_hw_htc_resetinit(struct ath_hw *ah);
-
/* PHY */
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
u32 *coef_mantissa, u32 *coef_exponent);
@@ -1158,7 +991,6 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan);
* Code Specific to AR5008, AR9001 or AR9002,
* we stuff these here to avoid callbacks for AR9003.
*/
-void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
int ar9002_hw_rf_claim(struct ath_hw *ah);
void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
@@ -1205,41 +1037,31 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
-bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
- u32 *payload, u8 len, bool wait_done,
- bool check_bt);
-void ar9003_mci_mute_bt(struct ath_hw *ah);
-u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
- u16 len, u32 sched_addr);
-void ar9003_mci_cleanup(struct ath_hw *ah);
-void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
- bool wait_done);
-u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
- u8 gpm_opcode, int time_out);
-void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
-void ar9003_mci_disable_interrupt(struct ath_hw *ah);
-void ar9003_mci_enable_interrupt(struct ath_hw *ah);
-void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
- bool is_full_sleep);
-bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
-void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
-void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
-void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
-void ar9003_mci_sync_bt_state(struct ath_hw *ah);
-void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
- u32 *rx_msg_intr);
-
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.enabled;
+}
+void ath9k_hw_btcoex_enable(struct ath_hw *ah);
static inline enum ath_btcoex_scheme
ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
{
return ah->btcoex_hw.scheme;
}
#else
-#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE
-#endif
+static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
+{
+ return false;
+}
+static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah)
+{
+}
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+ return ATH_BTCOEX_CFG_NONE;
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 53a005d288aa..60159f4ee532 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -172,7 +172,7 @@ static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
unsigned long flags;
spin_lock_irqsave(&sc->sc_serial_rw, flags);
iowrite32(val, sc->mem + reg_offset);
@@ -188,7 +188,7 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
struct ath_softc *sc = (struct ath_softc *) common->priv;
u32 val;
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
unsigned long flags;
spin_lock_irqsave(&sc->sc_serial_rw, flags);
val = ioread32(sc->mem + reg_offset);
@@ -219,7 +219,7 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
unsigned long uninitialized_var(flags);
u32 val;
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
spin_lock_irqsave(&sc->sc_serial_rw, flags);
val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
@@ -419,66 +419,6 @@ fail:
return error;
}
-static int ath9k_init_btcoex(struct ath_softc *sc)
-{
- struct ath_txq *txq;
- struct ath_hw *ah = sc->sc_ah;
- int r;
-
- switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_2WIRE:
- ath9k_hw_btcoex_init_2wire(sc->sc_ah);
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- ath9k_hw_btcoex_init_3wire(sc->sc_ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- return -1;
- txq = sc->tx.txq_map[WME_AC_BE];
- ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- break;
- case ATH_BTCOEX_CFG_MCI:
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
- INIT_LIST_HEAD(&sc->btcoex.mci.info);
-
- r = ath_mci_setup(sc);
- if (r)
- return r;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
- ah->btcoex_hw.mci.ready = false;
- ah->btcoex_hw.mci.bt_state = 0;
- ah->btcoex_hw.mci.bt_ver_major = 3;
- ah->btcoex_hw.mci.bt_ver_minor = 0;
- ah->btcoex_hw.mci.bt_version_known = false;
- ah->btcoex_hw.mci.update_2g5g = true;
- ah->btcoex_hw.mci.is_2g = true;
- ah->btcoex_hw.mci.wlan_channels_update = false;
- ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
- ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
- ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
- ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
- ah->btcoex_hw.mci.query_bt = true;
- ah->btcoex_hw.mci.unhalt_bt_gpm = true;
- ah->btcoex_hw.mci.halted_bt_gpm = false;
- ah->btcoex_hw.mci.need_flush_btinfo = false;
- ah->btcoex_hw.mci.wlan_cal_seq = 0;
- ah->btcoex_hw.mci.wlan_cal_done = 0;
- ah->btcoex_hw.mci.config = 0x2201;
- }
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return 0;
-}
-
static int ath9k_init_queues(struct ath_softc *sc)
{
int i = 0;
@@ -544,19 +484,11 @@ static void ath9k_init_misc(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i = 0;
+
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
sc->config.txpowlimit = ATH_TXPOWER_MAX;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- sc->sc_flags |= SC_OP_TXAGGR;
- sc->sc_flags |= SC_OP_RXAGGR;
- }
-
- sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
-
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
-
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
@@ -615,9 +547,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
mutex_init(&sc->mutex);
#ifdef CONFIG_ATH9K_DEBUGFS
spin_lock_init(&sc->nodes_lock);
- spin_lock_init(&sc->debug.samp_lock);
INIT_LIST_HEAD(&sc->nodes);
#endif
+#ifdef CONFIG_ATH9K_MAC_DEBUG
+ spin_lock_init(&sc->debug.samp_lock);
+#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
(unsigned long)sc);
@@ -880,12 +814,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
- if ((sc->btcoex.no_stomp_timer) &&
- ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
-
- if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
- ath_mci_cleanup(sc);
+ ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index e196aba77acf..f7bd2532269c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -185,13 +185,6 @@ bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
-void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
-{
- *txqs &= ah->intr_txqs;
- ah->intr_txqs &= ~(*txqs);
-}
-EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
-
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo)
{
@@ -340,6 +333,15 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
}
EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
+static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
+{
+ ah->txok_interrupt_mask &= ~(1 << q);
+ ah->txerr_interrupt_mask &= ~(1 << q);
+ ah->txdesc_interrupt_mask &= ~(1 << q);
+ ah->txeol_interrupt_mask &= ~(1 << q);
+ ah->txurn_interrupt_mask &= ~(1 << q);
+}
+
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -354,11 +356,7 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
- ah->txok_interrupt_mask &= ~(1 << q);
- ah->txerr_interrupt_mask &= ~(1 << q);
- ah->txdesc_interrupt_mask &= ~(1 << q);
- ah->txeol_interrupt_mask &= ~(1 << q);
- ah->txurn_interrupt_mask &= ~(1 << q);
+ ath9k_hw_clear_queue_interrupts(ah, q);
ath9k_hw_set_txq_interrupts(ah, qi);
return true;
@@ -510,26 +508,17 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
if (AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
- if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
+ ath9k_hw_clear_queue_interrupts(ah, q);
+ if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
ah->txok_interrupt_mask |= 1 << q;
- else
- ah->txok_interrupt_mask &= ~(1 << q);
- if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
ah->txerr_interrupt_mask |= 1 << q;
- else
- ah->txerr_interrupt_mask &= ~(1 << q);
+ }
if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
ah->txdesc_interrupt_mask |= 1 << q;
- else
- ah->txdesc_interrupt_mask &= ~(1 << q);
if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
ah->txeol_interrupt_mask |= 1 << q;
- else
- ah->txeol_interrupt_mask &= ~(1 << q);
if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
ah->txurn_interrupt_mask |= 1 << q;
- else
- ah->txurn_interrupt_mask &= ~(1 << q);
ath9k_hw_set_txq_interrupts(ah, qi);
return true;
@@ -745,7 +734,10 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
qi.tqi_aifs = 1;
qi.tqi_cwmin = 0;
qi.tqi_cwmax = 0;
- /* NB: don't enable any interrupts */
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
+
return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 11dbd1473a13..21c955609e6c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -583,8 +583,7 @@ enum ath9k_tx_queue {
#define ATH9K_WME_UPSD 4
enum ath9k_tx_queue_flags {
- TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
- TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
+ TXQ_FLAG_TXINT_ENABLE = 0x0001,
TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
@@ -714,7 +713,6 @@ u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
-void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo);
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 4a00806e2852..38794850f005 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,13 +118,15 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
+ if (sc->ps_flags & PS_WAIT_FOR_TX_ACK)
+ goto unlock;
+
+ if (sc->ps_idle)
mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
- PS_WAIT_FOR_PSPOLL_DATA |
- PS_WAIT_FOR_TX_ACK)))
+ PS_WAIT_FOR_PSPOLL_DATA)))
mode = ATH9K_PM_NETWORK_SLEEP;
else
goto unlock;
@@ -332,17 +334,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
hchan = ah->curchan;
}
- if (fastcc && (ah->chip_fullsleep ||
- !ath9k_hw_check_alive(ah)))
- fastcc = false;
-
if (!ath_prepare_reset(sc, retry_tx, flush))
fastcc = false;
ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
- hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS |
- CHANNEL_HT40PLUS)),
- fastcc);
+ hchan->channel, IS_CHAN_HT40(hchan), fastcc);
r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (r) {
@@ -373,12 +369,8 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (sc->sc_flags & SC_OP_INVALID)
return -EIO;
- ath9k_ps_wakeup(sc);
-
r = ath_reset_internal(sc, hchan, false);
- ath9k_ps_restore(sc);
-
return r;
}
@@ -647,7 +639,8 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
#endif
an->sta = sta;
an->vif = vif;
- if (sc->sc_flags & SC_OP_TXAGGR) {
+
+ if (sta->ht_cap.ht_supported) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
@@ -666,7 +659,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
an->sta = NULL;
#endif
- if (sc->sc_flags & SC_OP_TXAGGR)
+ if (sta->ht_cap.ht_supported)
ath_tx_node_cleanup(sc, an);
}
@@ -741,12 +734,7 @@ void ath9k_tasklet(unsigned long data)
ath_tx_tasklet(sc);
}
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- if (status & ATH9K_INT_GENTIMER)
- ath_gen_timer_isr(sc->sc_ah);
-
- if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI)
- ath_mci_intr(sc);
+ ath9k_btcoex_handle_interrupt(sc, status);
out:
/* re-enable hardware interrupt */
@@ -1004,12 +992,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
curchan->center_freq);
ath9k_ps_wakeup(sc);
-
mutex_lock(&sc->mutex);
- /* setup initial channel */
- sc->chan_idx = curchan->hw_value;
-
init_channel = ath9k_cmn_get_curchannel(hw, ah);
/* Reset SERDES registers */
@@ -1058,9 +1042,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
sc->sc_flags &= ~SC_OP_INVALID;
sc->sc_ah->is_monitoring = false;
- /* Disable BMISS interrupt when we're not associated */
- ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
-
if (!ath_complete_reset(sc, false)) {
r = -EIO;
spin_unlock_bh(&sc->sc_pcu_lock);
@@ -1081,16 +1062,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
- if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
- !ah->btcoex_hw.enabled) {
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- ath9k_hw_btcoex_enable(ah);
-
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- ath9k_btcoex_timer_resume(sc);
- }
+ ath9k_start_btcoex(sc);
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1191,13 +1163,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
- if (ah->btcoex_hw.enabled &&
- ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
- ath9k_hw_btcoex_disable(ah);
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- ath9k_btcoex_timer_pause(sc);
- ath_mci_flush_profile(&sc->btcoex.mci);
- }
+ ath9k_stop_btcoex(sc);
spin_lock_bh(&sc->sc_pcu_lock);
@@ -1303,7 +1269,6 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
iter_data->nwds++;
break;
default:
- iter_data->nothers++;
break;
}
}
@@ -1589,12 +1554,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- /*
- * Leave this as the first check because we need to turn on the
- * radio if it was disabled before prior to processing the rest
- * of the changes. Likewise we must only disable the radio towards
- * the end.
- */
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
if (sc->ps_idle)
@@ -1793,7 +1752,7 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
- if (!(sc->sc_flags & SC_OP_TXAGGR))
+ if (!sta->ht_cap.ht_supported)
return;
switch (cmd) {
@@ -2005,7 +1964,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- if (changed & BSS_CHANGED_BSSID) {
+ if (changed & BSS_CHANGED_ASSOC) {
ath9k_config_bss(sc, vif);
ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
@@ -2085,25 +2044,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_beacon_config(sc, vif);
}
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
- else
- sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
- }
-
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n",
- bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- hw->conf.channel->band != IEEE80211_BAND_5GHZ)
- sc->sc_flags |= SC_OP_PROTECT_ENABLE;
- else
- sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
- }
-
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
}
@@ -2161,15 +2101,10 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!(sc->sc_flags & SC_OP_RXAGGR))
- ret = -ENOTSUPP;
break;
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
- if (!(sc->sc_flags & SC_OP_TXAGGR))
- return -EOPNOTSUPP;
-
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
@@ -2332,6 +2267,7 @@ static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
struct ath_vif *avp;
struct ath_buf *bf;
struct ath_tx_status ts;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int status;
vif = sc->beacon.bslot[0];
@@ -2342,7 +2278,7 @@ static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
if (!avp->is_bslot_active)
return 0;
- if (!sc->beacon.tx_processed) {
+ if (!sc->beacon.tx_processed && !edma) {
tasklet_disable(&sc->bcon_tasklet);
bf = avp->av_bcbuf;
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 05c23ea4c633..29fe52d69973 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -42,24 +42,18 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *entry;
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
- (info->type == MCI_GPM_COEX_PROFILE_VOICE)) {
- ath_dbg(common, MCI,
- "Too many SCO profile, failed to add new profile\n");
+ (info->type == MCI_GPM_COEX_PROFILE_VOICE))
return false;
- }
if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
- (info->type != MCI_GPM_COEX_PROFILE_VOICE)) {
- ath_dbg(common, MCI,
- "Too many ACL profile, failed to add new profile\n");
+ (info->type != MCI_GPM_COEX_PROFILE_VOICE))
return false;
- }
entry = ath_mci_find_profile(mci, info);
- if (entry)
+ if (entry) {
memcpy(entry, info, 10);
- else {
+ } else {
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return false;
@@ -68,6 +62,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
INC_PROF(mci, info);
list_add_tail(&info->list, &mci->info);
}
+
return true;
}
@@ -79,10 +74,9 @@ static void ath_mci_del_profile(struct ath_common *common,
entry = ath_mci_find_profile(mci, info);
- if (!entry) {
- ath_dbg(common, MCI, "Profile to be deleted not found\n");
+ if (!entry)
return;
- }
+
DEC_PROF(mci, entry);
list_del(&entry->list);
kfree(entry);
@@ -177,13 +171,12 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
btcoex->btcoex_period *= 1000;
btcoex->btcoex_no_stomp = btcoex->btcoex_period *
- (100 - btcoex->duty_cycle) / 100;
+ (100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
ath9k_btcoex_timer_resume(sc);
}
-
static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
@@ -192,42 +185,24 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
switch (opcode) {
case MCI_GPM_BT_CAL_REQ:
-
- ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n");
-
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- } else
- ath_dbg(common, MCI, "MCI State mismatches: %d\n",
+ } else {
+ ath_dbg(common, MCI, "MCI State mismatch: %d\n",
ar9003_mci_state(ah, MCI_STATE_BT, NULL));
-
+ }
break;
-
case MCI_GPM_BT_CAL_DONE:
-
- ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n");
-
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
- ath_dbg(common, MCI, "MCI error illegal!\n");
- else
- ath_dbg(common, MCI, "MCI BT not in CAL state\n");
-
+ ar9003_mci_state(ah, MCI_STATE_BT, NULL);
break;
-
case MCI_GPM_BT_CAL_GRANT:
-
- ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n");
-
- /* Send WLAN_CAL_DONE for now */
- ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n");
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
16, false, true);
break;
-
default:
- ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n");
+ ath_dbg(common, MCI, "Unknown GPM CAL message\n");
break;
}
}
@@ -247,6 +222,7 @@ static void ath_mci_process_profile(struct ath_softc *sc,
btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
mci->aggr_limit = mci->num_sco ? 6 : 0;
+
if (NUM_PROF(mci)) {
btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
@@ -262,31 +238,24 @@ static void ath_mci_process_profile(struct ath_softc *sc,
static void ath_mci_process_status(struct ath_softc *sc,
struct ath_mci_profile_status *status)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
struct ath_mci_profile_info info;
int i = 0, old_num_mgmt = mci->num_mgmt;
/* Link status type are not handled */
- if (status->is_link) {
- ath_dbg(common, MCI, "Skip link type status update\n");
+ if (status->is_link)
return;
- }
memset(&info, 0, sizeof(struct ath_mci_profile_info));
info.conn_handle = status->conn_handle;
- if (ath_mci_find_profile(mci, &info)) {
- ath_dbg(common, MCI,
- "Skip non link state update for existing profile %d\n",
- status->conn_handle);
+ if (ath_mci_find_profile(mci, &info))
return;
- }
- if (status->conn_handle >= ATH_MCI_MAX_PROFILE) {
- ath_dbg(common, MCI, "Ignore too many non-link update\n");
+
+ if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
return;
- }
+
if (status->is_critical)
__set_bit(status->conn_handle, mci->status);
else
@@ -314,43 +283,28 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
u32 seq_num;
switch (opcode) {
-
case MCI_GPM_COEX_VERSION_QUERY:
- ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
- version = ar9003_mci_state(ah,
- MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+ version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION,
+ NULL);
break;
-
case MCI_GPM_COEX_VERSION_RESPONSE:
- ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
- ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
- major, minor);
version = (major << 8) + minor;
- version = ar9003_mci_state(ah,
- MCI_STATE_SET_BT_COEX_VERSION, &version);
+ version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
+ &version);
break;
-
case MCI_GPM_COEX_STATUS_QUERY:
- ath_dbg(common, MCI,
- "MCI Recv GPM COEX Status Query = 0x%02x\n",
- *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
- ar9003_mci_state(ah,
- MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL);
break;
-
case MCI_GPM_COEX_BT_PROFILE_INFO:
- ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n");
memcpy(&profile_info,
(rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
- if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
- || (profile_info.type >=
- MCI_GPM_COEX_PROFILE_MAX)) {
-
+ if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) ||
+ (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) {
ath_dbg(common, MCI,
- "illegal profile type = %d, state = %d\n",
+ "Illegal profile type = %d, state = %d\n",
profile_info.type,
profile_info.start);
break;
@@ -358,7 +312,6 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
ath_mci_process_profile(sc, &profile_info);
break;
-
case MCI_GPM_COEX_BT_STATUS_UPDATE:
profile_status.is_link = *(rx_payload +
MCI_GPM_COEX_B_STATUS_TYPE);
@@ -369,98 +322,66 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
seq_num = *((u32 *)(rx_payload + 12));
ath_dbg(common, MCI,
- "MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
profile_status.is_link, profile_status.conn_handle,
profile_status.is_critical, seq_num);
ath_mci_process_status(sc, &profile_status);
break;
-
default:
- ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n",
- opcode);
+ ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
break;
}
}
-static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
-{
- int error = 0;
-
- buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
- &buf->bf_paddr, GFP_KERNEL);
-
- if (buf->bf_addr == NULL) {
- error = -ENOMEM;
- goto fail;
- }
-
- return 0;
-
-fail:
- memset(buf, 0, sizeof(*buf));
- return error;
-}
-
-static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
-{
- if (buf->bf_addr) {
- dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
- buf->bf_paddr);
- memset(buf, 0, sizeof(*buf));
- }
-}
-
int ath_mci_setup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_mci_coex *mci = &sc->mci_coex;
- int error = 0;
-
- if (!ATH9K_HW_CAP_MCI)
- return 0;
+ struct ath_mci_buf *buf = &mci->sched_buf;
- mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+ buf->bf_addr = dma_alloc_coherent(sc->dev,
+ ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
+ &buf->bf_paddr, GFP_KERNEL);
- if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+ if (buf->bf_addr == NULL) {
ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
- error = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
- mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+ memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN,
+ ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE);
- memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
- mci->sched_buf.bf_len);
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
- mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
- mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
- /* initialize the buffer */
- memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
-
ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
mci->sched_buf.bf_paddr);
-fail:
- return error;
+
+ ath_dbg(common, MCI, "MCI Initialized\n");
+
+ return 0;
}
void ath_mci_cleanup(struct ath_softc *sc)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_mci_buf *buf = &mci->sched_buf;
- if (!ATH9K_HW_CAP_MCI)
- return;
+ if (buf->bf_addr)
+ dma_free_coherent(sc->dev,
+ ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
+ buf->bf_addr, buf->bf_paddr);
- /*
- * both schedule and gpm buffers will be released
- */
- ath_mci_buf_free(sc, &mci->sched_buf);
ar9003_mci_cleanup(ah);
+
+ ath_dbg(common, MCI, "MCI De-Initialized\n");
}
void ath_mci_intr(struct ath_softc *sc)
@@ -474,19 +395,10 @@ void ath_mci_intr(struct ath_softc *sc)
u32 more_data = MCI_GPM_MORE;
bool skip_gpm = false;
- if (!ATH9K_HW_CAP_MCI)
- return;
-
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
-
- ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
- ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n");
-
- ath_dbg(common, MCI,
- "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
- mci_int, mci_int_rxmsg);
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
return;
}
@@ -499,11 +411,8 @@ void ath_mci_intr(struct ath_softc *sc)
* only when BT wake up. Now they are always sent, as a
* recovery method to reset BT MCI's RX alignment.
*/
- ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n");
-
ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
payload, 16, true, false);
- ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n");
ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
NULL, 0, true, false);
@@ -513,74 +422,51 @@ void ath_mci_intr(struct ath_softc *sc)
/*
* always do this for recovery and 2G/5G toggling and LNA_TRANS
*/
- ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n");
ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
}
- /* Processing SYS_WAKING/SYS_SLEEPING */
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
-
- if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
- == MCI_BT_SLEEP)
- ath_dbg(common, MCI,
- "MCI BT stays in sleep mode\n");
- else {
- ath_dbg(common, MCI,
- "MCI Set BT state to AWAKE\n");
- ar9003_mci_state(ah,
- MCI_STATE_SET_BT_AWAKE, NULL);
- }
- } else
- ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n");
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
+ MCI_BT_SLEEP)
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE,
+ NULL);
+ }
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
-
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
-
- if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
- == MCI_BT_AWAKE)
- ath_dbg(common, MCI,
- "MCI BT stays in AWAKE mode\n");
- else {
- ath_dbg(common, MCI,
- "MCI SetBT state to SLEEP\n");
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
+ MCI_BT_AWAKE)
ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
NULL);
- }
- } else
- ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n");
+ }
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
-
- ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n");
ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
skip_gpm = true;
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
-
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
NULL);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
-
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
while (more_data == MCI_GPM_MORE) {
pgpm = mci->gpm_buf.bf_addr;
- offset = ar9003_mci_state(ah,
- MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
if (offset == MCI_GPM_INVALID)
break;
@@ -591,44 +477,38 @@ void ath_mci_intr(struct ath_softc *sc)
* The first dword is timer.
* The real data starts from 2nd dword.
*/
-
subtype = MCI_GPM_TYPE(pgpm);
opcode = MCI_GPM_OPCODE(pgpm);
- if (!skip_gpm) {
-
- if (MCI_GPM_IS_CAL_TYPE(subtype))
- ath_mci_cal_msg(sc, subtype,
- (u8 *) pgpm);
- else {
- switch (subtype) {
- case MCI_GPM_COEX_AGENT:
- ath_mci_msg(sc, opcode,
- (u8 *) pgpm);
- break;
- default:
- break;
- }
+ if (skip_gpm)
+ goto recycle;
+
+ if (MCI_GPM_IS_CAL_TYPE(subtype)) {
+ ath_mci_cal_msg(sc, subtype, (u8 *)pgpm);
+ } else {
+ switch (subtype) {
+ case MCI_GPM_COEX_AGENT:
+ ath_mci_msg(sc, opcode, (u8 *)pgpm);
+ break;
+ default:
+ break;
}
}
+ recycle:
MCI_GPM_RECYCLE(pgpm);
}
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
-
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
- if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
- ath_dbg(common, MCI, "MCI LNA_INFO\n");
- }
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
-
int value_dbm = ar9003_mci_state(ah,
- MCI_STATE_CONT_RSSI_POWER, NULL);
+ MCI_STATE_CONT_RSSI_POWER, NULL);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
@@ -636,33 +516,25 @@ void ath_mci_intr(struct ath_softc *sc)
ath_dbg(common, MCI,
"MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
ar9003_mci_state(ah,
- MCI_STATE_CONT_PRIORITY, NULL),
+ MCI_STATE_CONT_PRIORITY, NULL),
value_dbm);
else
ath_dbg(common, MCI,
"MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
ar9003_mci_state(ah,
- MCI_STATE_CONT_PRIORITY, NULL),
+ MCI_STATE_CONT_PRIORITY, NULL),
value_dbm);
}
- if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
- ath_dbg(common, MCI, "MCI CONT_NACK\n");
- }
- if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
- ath_dbg(common, MCI, "MCI CONT_RST\n");
- }
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
-
- if (mci_int_rxmsg & 0xfffffffe)
- ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n",
- mci_int_rxmsg);
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index 29e3e51d078f..c841444f53c2 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -17,6 +17,8 @@
#ifndef MCI_H
#define MCI_H
+#include "ar9003_mci.h"
+
#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
#define ATH_MCI_GPM_MAX_ENTRY 16
#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
@@ -113,7 +115,6 @@ struct ath_mci_profile {
u8 num_bdr;
};
-
struct ath_mci_buf {
void *bf_addr; /* virtual addr of desc */
dma_addr_t bf_paddr; /* physical addr of buffer */
@@ -121,10 +122,8 @@ struct ath_mci_buf {
};
struct ath_mci_coex {
- atomic_t mci_cal_flag;
struct ath_mci_buf sched_buf;
struct ath_mci_buf gpm_buf;
- u32 bt_cal_start;
};
void ath_mci_flush_profile(struct ath_mci_profile *mci);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index a427a16bb739..4f848493fece 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -567,10 +567,8 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
const struct ath_rate_table *rate_table,
- u8 *mcs_set, u32 capflag)
+ struct ath_rateset *rateset, u32 capflag)
{
- struct ath_rateset *rateset = (struct ath_rateset *)mcs_set;
-
u8 i, j, hi = 0;
/* Use intersection of working rates and valid rates */
@@ -750,7 +748,8 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
* If 802.11g protection is enabled, determine whether to use RTS/CTS or
* just CTS. Note that this is only done for OFDM/HT unicast frames.
*/
- if ((sc->sc_flags & SC_OP_PROTECT_ENABLE) &&
+ if ((tx_info->control.vif &&
+ tx_info->control.vif->bss_conf.use_cts_prot) &&
(rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
@@ -1212,7 +1211,7 @@ static void ath_rc_init(struct ath_softc *sc,
{
struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates;
+ struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
u8 i, j, k, hi = 0, hthi = 0;
/* Initial rate table size. Will change depending
@@ -1228,7 +1227,7 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_init_valid_rate_idx(ath_rc_priv);
for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < MAX_TX_RATE_PHY; j++)
+ for (j = 0; j < RATE_TABLE_SIZE; j++)
ath_rc_priv->valid_phy_rateidx[i][j] = 0;
ath_rc_priv->valid_phy_ratecnt[i] = 0;
}
@@ -1300,12 +1299,13 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
return caps;
}
-static bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an,
+static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
u8 tidno)
{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid;
- if (!(sc->sc_flags & SC_OP_TXAGGR))
+ if (!sta->ht_cap.ht_supported)
return false;
txtid = ATH_AN_2_TID(an, tidno);
@@ -1376,13 +1376,11 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (ieee80211_is_data_qos(fc) &&
skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
u8 *qc, tid;
- struct ath_node *an;
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
- an = (struct ath_node *)sta->drv_priv;
- if(ath_tx_aggr_check(sc, an, tid))
+ if(ath_tx_aggr_check(sc, sta, tid))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index b7a4bcd3eec7..75f8e9b06b28 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -25,8 +25,6 @@ struct ath_softc;
#define ATH_RATE_MAX 30
#define RATE_TABLE_SIZE 72
-#define MAX_TX_RATE_PHY 48
-
#define RC_INVALID 0x0000
#define RC_LEGACY 0x0001
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 7e1a91af1497..f4ae3ba994a8 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -169,22 +169,17 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
enum ath9k_rx_qtype qtype, int size)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u32 nbuf = 0;
+ struct ath_buf *bf, *tbf;
if (list_empty(&sc->rx.rxbuf)) {
ath_dbg(common, QUEUE, "No free rx buf available\n");
return;
}
- while (!list_empty(&sc->rx.rxbuf)) {
- nbuf++;
-
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
if (!ath_rx_edma_buf_link(sc, qtype))
break;
- if (nbuf >= size)
- break;
- }
}
static void ath_rx_remove_buffer(struct ath_softc *sc,
@@ -232,7 +227,6 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
skb_queue_head_init(&rx_edma->rx_fifo);
- skb_queue_head_init(&rx_edma->rx_buffers);
rx_edma->rx_fifo_hwsize = size;
}
@@ -658,7 +652,9 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
}
static bool ath_edma_get_buffers(struct ath_softc *sc,
- enum ath9k_rx_qtype qtype)
+ enum ath9k_rx_qtype qtype,
+ struct ath_rx_status *rs,
+ struct ath_buf **dest)
{
struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
struct ath_hw *ah = sc->sc_ah;
@@ -677,7 +673,7 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
common->rx_bufsize, DMA_FROM_DEVICE);
- ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
+ ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
if (ret == -EINPROGRESS) {
/*let device gain the buffer again*/
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
@@ -690,20 +686,21 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
/* corrupt descriptor, skip this one and the following one */
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
- skb = skb_peek(&rx_edma->rx_fifo);
- if (!skb)
- return true;
- bf = SKB_CB_ATHBUF(skb);
- BUG_ON(!bf);
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (skb) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
- __skb_unlink(skb, &rx_edma->rx_fifo);
- list_add_tail(&bf->list, &sc->rx.rxbuf);
- ath_rx_edma_buf_link(sc, qtype);
- return true;
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+ bf = NULL;
+ }
}
- skb_queue_tail(&rx_edma->rx_buffers, skb);
+ *dest = bf;
return true;
}
@@ -711,18 +708,15 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs,
enum ath9k_rx_qtype qtype)
{
- struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
- struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_buf *bf = NULL;
- while (ath_edma_get_buffers(sc, qtype));
- skb = __skb_dequeue(&rx_edma->rx_buffers);
- if (!skb)
- return NULL;
+ while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
+ if (!bf)
+ continue;
- bf = SKB_CB_ATHBUF(skb);
- ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
- return bf;
+ return bf;
+ }
+ return NULL;
}
static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
@@ -954,6 +948,7 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
+ int rssi = rx_stats->rs_rssi;
if (!rx_stats->is_mybeacon ||
((ah->opmode != NL80211_IFTYPE_STATION) &&
@@ -965,13 +960,12 @@ static void ath9k_process_rssi(struct ath_common *common,
last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
- ATH_RSSI_EP_MULTIPLIER);
- if (rx_stats->rs_rssi < 0)
- rx_stats->rs_rssi = 0;
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
/* Update Beacon RSSI, this is used by ANI. */
- ah->stats.avgbrssi = rx_stats->rs_rssi;
+ ah->stats.avgbrssi = rssi;
}
/*
@@ -988,8 +982,6 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
{
struct ath_hw *ah = common->ah;
- memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
-
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
@@ -1011,6 +1003,8 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
rx_status->signal = ah->noise + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ if (rx_stats->rs_moreaggr)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
return 0;
}
@@ -1845,6 +1839,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag;
+ memset(rxs, 0, sizeof(struct ieee80211_rx_status));
+
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 6e2f18861f5d..458f81b4a7cb 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -797,7 +797,6 @@
#define AR_SREV_VERSION_9580 0x1C0
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
-#define AR_SREV_REVISION_9462_10 0
#define AR_SREV_REVISION_9462_20 2
#define AR_SREV_5416(_ah) \
@@ -898,10 +897,6 @@
#define AR_SREV_9462(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
-#define AR_SREV_9462_10(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_10))
-
#define AR_SREV_9462_20(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
@@ -1156,6 +1151,7 @@ enum {
#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4)
#define AR_ENT_OTP 0x40d8
#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
+#define AR_ENT_OTP_49GHZ_DISABLE 0x00100000
#define AR_ENT_OTP_MIN_PKT_SIZE_DISABLE 0x00800000
#define AR_CH0_BB_DPLL1 0x16180
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3182408ffe35..834e6bc45e8b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -647,9 +647,8 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
- struct ath_mci_profile *mci = &sc->btcoex.mci;
u32 max_4ms_framelen, frmlen;
- u16 aggr_limit, legacy = 0;
+ u16 aggr_limit, bt_aggr_limit, legacy = 0;
int i;
skb = bf->bf_mpdu;
@@ -694,14 +693,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
return 0;
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
- aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
- else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
- aggr_limit = min((max_4ms_framelen * 3) / 8,
- (u32)ATH_AMPDU_LIMIT_MAX);
- else
- aggr_limit = min(max_4ms_framelen,
- (u32)ATH_AMPDU_LIMIT_MAX);
+ aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
+
+ /*
+ * Override the default aggregation limit for BTCOEX.
+ */
+ bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
+ if (bt_aggr_limit)
+ aggr_limit = bt_aggr_limit;
/*
* h/w can accept aggregates up to 16 bit lengths (65535).
@@ -956,7 +955,9 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
*/
rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
info->rtscts_rate = rate->hw_value;
- if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
+
+ if (tx_info->control.vif &&
+ tx_info->control.vif->bss_conf.use_short_preamble)
info->rtscts_rate |= rate->hw_value_short;
for (i = 0; i < 4; i++) {
@@ -1291,14 +1292,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
an = (struct ath_node *)sta->drv_priv;
- if (sc->sc_flags & SC_OP_TXAGGR) {
- txtid = ATH_AN_2_TID(an, tid);
- txtid->baw_size =
- IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
- txtid->state |= AGGR_ADDBA_COMPLETE;
- txtid->state &= ~AGGR_ADDBA_PROGRESS;
- ath_tx_resume_tid(sc, txtid);
- }
+ txtid = ATH_AN_2_TID(an, tid);
+ txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
+ txtid->state |= AGGR_ADDBA_COMPLETE;
+ txtid->state &= ~AGGR_ADDBA_PROGRESS;
+ ath_tx_resume_tid(sc, txtid);
}
/********************/
@@ -1357,8 +1355,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
* based intr on the EOSP frames.
*/
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
- qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
- TXQ_FLAG_TXERRINT_ENABLE;
+ qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
} else {
if (qtype == ATH9K_TX_QUEUE_UAPSD)
qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
@@ -1524,7 +1521,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
/* flush any pending frames if aggregation is enabled */
- if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
ath_txq_drain_pending_buffers(sc, txq);
ath_txq_unlock_complete(sc, txq);
@@ -1872,7 +1869,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
@@ -2142,7 +2139,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
- if (sc->sc_flags & SC_OP_TXAGGR)
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ath_txq_schedule(sc, txq);
}
@@ -2167,7 +2164,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
- if (sc->sc_flags & SC_OP_TXAGGR)
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ath_txq_schedule(sc, txq);
break;
}
@@ -2264,10 +2261,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
void ath_tx_tasklet(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+ u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
int i;
- u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
-
- ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
@@ -2297,9 +2293,12 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
break;
}
- /* Skip beacon completions */
- if (ts.qid == sc->beacon.beaconq)
+ /* Process beacon completions separately */
+ if (ts.qid == sc->beacon.beaconq) {
+ sc->beacon.tx_processed = true;
+ sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
continue;
+ }
txq = &sc->tx.txq[ts.qid];
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 6cfbb419e2f6..0cea20e3e250 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -559,6 +559,7 @@ int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
int carl9170_disable_key(struct ar9170 *ar, const u8 id);
+int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel);
/* RX */
void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
@@ -593,7 +594,6 @@ int carl9170_get_noisefloor(struct ar9170 *ar);
/* FW */
int carl9170_parse_firmware(struct ar9170 *ar);
-int carl9170_fw_fix_eeprom(struct ar9170 *ar);
extern struct ieee80211_rate __carl9170_ratetable[];
extern int modparam_noht;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 3de61adacd34..cffde8d9a521 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -389,39 +389,6 @@ carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len)
return (void *)&fw_data[scan - found];
}
-int carl9170_fw_fix_eeprom(struct ar9170 *ar)
-{
- const struct carl9170fw_fix_desc *fix_desc = NULL;
- unsigned int i, n, off;
- u32 *data = (void *)&ar->eeprom;
-
- fix_desc = carl9170_fw_find_desc(ar, FIX_MAGIC,
- sizeof(*fix_desc), CARL9170FW_FIX_DESC_CUR_VER);
-
- if (!fix_desc)
- return 0;
-
- n = (le16_to_cpu(fix_desc->head.length) - sizeof(*fix_desc)) /
- sizeof(struct carl9170fw_fix_entry);
-
- for (i = 0; i < n; i++) {
- off = le32_to_cpu(fix_desc->data[i].address) -
- AR9170_EEPROM_START;
-
- if (off >= sizeof(struct ar9170_eeprom) || (off & 3)) {
- dev_err(&ar->udev->dev, "Skip invalid entry %d\n", i);
- continue;
- }
-
- data[off / sizeof(*data)] &=
- le32_to_cpu(fix_desc->data[i].mask);
- data[off / sizeof(*data)] |=
- le32_to_cpu(fix_desc->data[i].value);
- }
-
- return 0;
-}
-
int carl9170_parse_firmware(struct ar9170 *ar)
{
const struct carl9170fw_desc_head *fw_desc = NULL;
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index dfda91970995..53415bfd8bef 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -485,3 +485,38 @@ int carl9170_disable_key(struct ar9170 *ar, const u8 id)
return carl9170_exec_cmd(ar, CARL9170_CMD_DKEY,
sizeof(key), (u8 *)&key, 0, NULL);
}
+
+int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
+{
+ unsigned int power, chains;
+
+ if (ar->eeprom.tx_mask != 1)
+ chains = AR9170_TX_PHY_TXCHAIN_2;
+ else
+ chains = AR9170_TX_PHY_TXCHAIN_1;
+
+ switch (channel->band) {
+ case IEEE80211_BAND_2GHZ:
+ power = ar->power_2G_ofdm[0] & 0x3f;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ power = ar->power_5G_leg[0] & 0x3f;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ power = min_t(unsigned int, power, ar->hw->conf.power_level * 2);
+
+ carl9170_regwrite_begin(ar);
+ carl9170_regwrite(AR9170_MAC_REG_ACK_TPC,
+ 0x3c1e | power << 20 | chains << 26);
+ carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_TPC,
+ power << 5 | chains << 11 |
+ power << 21 | chains << 27);
+ carl9170_regwrite(AR9170_MAC_REG_CFEND_QOSNULL_TPC,
+ power << 5 | chains << 11 |
+ power << 21 | chains << 27);
+ carl9170_regwrite_finish();
+ return carl9170_regwrite_result();
+}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index db774212161b..8d2523b3f722 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -853,11 +853,6 @@ static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- /* TODO */
- err = 0;
- }
-
if (changed & IEEE80211_CONF_CHANGE_SMPS) {
/* TODO */
err = 0;
@@ -891,6 +886,12 @@ static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ err = carl9170_set_mac_tpc(ar, ar->hw->conf.channel);
+ if (err)
+ goto out;
+ }
+
out:
mutex_unlock(&ar->mutex);
return err;
@@ -1796,6 +1797,9 @@ void *carl9170_alloc(size_t priv_size)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /* As IBSS Encryption is software-based, IBSS RSN is supported. */
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
return ar;
err_nomem:
@@ -1931,10 +1935,6 @@ int carl9170_register(struct ar9170 *ar)
if (err)
return err;
- err = carl9170_fw_fix_eeprom(ar);
- if (err)
- return err;
-
err = carl9170_parse_eeprom(ar);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index 472efc7e3402..b72c09cf43a4 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -1426,15 +1426,15 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
#undef EDGES
}
-static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
- enum carl9170_bw bw)
+static void carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
+ enum carl9170_bw bw)
{
struct ar9170_calibration_target_power_legacy *ctpl;
struct ar9170_calibration_target_power_ht *ctph;
u8 *ctpres;
int ntargets;
int idx, i, n;
- u8 ackpower, ackchains, f;
+ u8 f;
u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
if (freq < 3000)
@@ -1523,32 +1523,6 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
/* calc. conformance test limits and apply to ar->power*[] */
carl9170_calc_ctl(ar, freq, bw);
-
- /* set ACK/CTS TX power */
- carl9170_regwrite_begin(ar);
-
- if (ar->eeprom.tx_mask != 1)
- ackchains = AR9170_TX_PHY_TXCHAIN_2;
- else
- ackchains = AR9170_TX_PHY_TXCHAIN_1;
-
- if (freq < 3000)
- ackpower = ar->power_2G_ofdm[0] & 0x3f;
- else
- ackpower = ar->power_5G_leg[0] & 0x3f;
-
- carl9170_regwrite(AR9170_MAC_REG_ACK_TPC,
- 0x3c1e | ackpower << 20 | ackchains << 26);
- carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_TPC,
- ackpower << 5 | ackchains << 11 |
- ackpower << 21 | ackchains << 27);
-
- carl9170_regwrite(AR9170_MAC_REG_CFEND_QOSNULL_TPC,
- ackpower << 5 | ackchains << 11 |
- ackpower << 21 | ackchains << 27);
-
- carl9170_regwrite_finish();
- return carl9170_regwrite_result();
}
int carl9170_get_noisefloor(struct ar9170 *ar)
@@ -1712,7 +1686,9 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
if (err)
return err;
- err = carl9170_set_power_cal(ar, channel->center_freq, bw);
+ carl9170_set_power_cal(ar, channel->center_freq, bw);
+
+ err = carl9170_set_mac_tpc(ar, channel);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index bbc813dee983..aed305177af6 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -719,6 +719,8 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
else
*chains = AR9170_TX_PHY_TXCHAIN_2;
}
+
+ *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2);
}
static __le32 carl9170_tx_physet(struct ar9170 *ar,
@@ -1245,7 +1247,7 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
tx_info = IEEE80211_SKB_CB(skb);
if (unlikely(sta_info->sleeping) &&
- !(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE |
+ !(tx_info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index d9218fe02036..ea2c737138d3 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -57,7 +57,8 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
-void ath_printk(const char *level, const char *fmt, ...)
+void ath_printk(const char *level, const struct ath_common* common,
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -67,7 +68,11 @@ void ath_printk(const char *level, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sath: %pV", level, &vaf);
+ if (common && common->hw && common->hw->wiphy)
+ printk("%sath: %s: %pV",
+ level, wiphy_name(common->hw->wiphy), &vaf);
+ else
+ printk("%sath: %pV", level, &vaf);
va_end(args);
}
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 7e45ca2e78ef..3010cee7b95a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1533,10 +1533,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
/* Create the network device object. */
dev = alloc_etherdev(sizeof(*priv));
- if (!dev) {
- printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n");
+ if (!dev)
return NULL;
- }
+
if (dev_alloc_name(dev, dev->name) < 0) {
printk(KERN_ERR "atmel: Couldn't get name!\n");
goto err_out_free;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 16e8f8058155..67c13af6f206 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -932,6 +932,9 @@ struct b43_wl {
/* Flag that implement the queues stopping. */
bool tx_queue_stopped[B43_QOS_QUEUE_NUM];
+ /* firmware loading work */
+ struct work_struct firmware_load;
+
/* The device LEDs. */
struct b43_leds leds;
@@ -999,6 +1002,12 @@ static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value)
dev->dev->write16(dev->dev, offset, value);
}
+static inline void b43_maskset16(struct b43_wldev *dev, u16 offset, u16 mask,
+ u16 set)
+{
+ b43_write16(dev, offset, (b43_read16(dev, offset) & mask) | set);
+}
+
static inline u32 b43_read32(struct b43_wldev *dev, u16 offset)
{
return dev->dev->read32(dev->dev, offset);
@@ -1009,6 +1018,12 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
dev->dev->write32(dev->dev, offset, value);
}
+static inline void b43_maskset32(struct b43_wldev *dev, u16 offset, u32 mask,
+ u32 set)
+{
+ b43_write32(dev, offset, (b43_read32(dev, offset) & mask) | set);
+}
+
static inline void b43_block_read(struct b43_wldev *dev, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 23ffb1b9a86f..c79e6638c88d 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -580,22 +580,14 @@ void b43_tsf_read(struct b43_wldev *dev, u64 *tsf)
static void b43_time_lock(struct b43_wldev *dev)
{
- u32 macctl;
-
- macctl = b43_read32(dev, B43_MMIO_MACCTL);
- macctl |= B43_MACCTL_TBTTHOLD;
- b43_write32(dev, B43_MMIO_MACCTL, macctl);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_TBTTHOLD);
/* Commit the write */
b43_read32(dev, B43_MMIO_MACCTL);
}
static void b43_time_unlock(struct b43_wldev *dev)
{
- u32 macctl;
-
- macctl = b43_read32(dev, B43_MMIO_MACCTL);
- macctl &= ~B43_MACCTL_TBTTHOLD;
- b43_write32(dev, B43_MMIO_MACCTL, macctl);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_TBTTHOLD, 0);
/* Commit the write */
b43_read32(dev, B43_MMIO_MACCTL);
}
@@ -2398,8 +2390,14 @@ error:
return err;
}
-static int b43_request_firmware(struct b43_wldev *dev)
+static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl);
+static void b43_one_core_detach(struct b43_bus_dev *dev);
+
+static void b43_request_firmware(struct work_struct *work)
{
+ struct b43_wl *wl = container_of(work,
+ struct b43_wl, firmware_load);
+ struct b43_wldev *dev = wl->current_dev;
struct b43_request_fw_context *ctx;
unsigned int i;
int err;
@@ -2407,23 +2405,23 @@ static int b43_request_firmware(struct b43_wldev *dev)
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return;
ctx->dev = dev;
ctx->req_type = B43_FWTYPE_PROPRIETARY;
err = b43_try_request_fw(ctx);
if (!err)
- goto out; /* Successfully loaded it. */
- err = ctx->fatal_failure;
- if (err)
+ goto start_ieee80211; /* Successfully loaded it. */
+ /* Was fw version known? */
+ if (ctx->fatal_failure)
goto out;
+ /* proprietary fw not found, try open source */
ctx->req_type = B43_FWTYPE_OPENSOURCE;
err = b43_try_request_fw(ctx);
if (!err)
- goto out; /* Successfully loaded it. */
- err = ctx->fatal_failure;
- if (err)
+ goto start_ieee80211; /* Successfully loaded it. */
+ if(ctx->fatal_failure)
goto out;
/* Could not find a usable firmware. Print the errors. */
@@ -2433,11 +2431,20 @@ static int b43_request_firmware(struct b43_wldev *dev)
b43err(dev->wl, errmsg);
}
b43_print_fw_helptext(dev->wl, 1);
- err = -ENOENT;
+ goto out;
+
+start_ieee80211:
+ err = ieee80211_register_hw(wl->hw);
+ if (err)
+ goto err_one_core_detach;
+ b43_leds_register(wl->current_dev);
+ goto out;
+
+err_one_core_detach:
+ b43_one_core_detach(dev->dev);
out:
kfree(ctx);
- return err;
}
static int b43_upload_microcode(struct b43_wldev *dev)
@@ -2487,10 +2494,8 @@ static int b43_upload_microcode(struct b43_wldev *dev)
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_ALL);
/* Start the microcode PSM */
- macctl = b43_read32(dev, B43_MMIO_MACCTL);
- macctl &= ~B43_MACCTL_PSM_JMP0;
- macctl |= B43_MACCTL_PSM_RUN;
- b43_write32(dev, B43_MMIO_MACCTL, macctl);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_JMP0,
+ B43_MACCTL_PSM_RUN);
/* Wait for the microcode to load and respond */
i = 0;
@@ -2590,10 +2595,9 @@ static int b43_upload_microcode(struct b43_wldev *dev)
return 0;
error:
- macctl = b43_read32(dev, B43_MMIO_MACCTL);
- macctl &= ~B43_MACCTL_PSM_RUN;
- macctl |= B43_MACCTL_PSM_JMP0;
- b43_write32(dev, B43_MMIO_MACCTL, macctl);
+ /* Stop the microcode PSM. */
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN,
+ B43_MACCTL_PSM_JMP0);
return err;
}
@@ -2708,11 +2712,8 @@ static int b43_gpio_init(struct b43_wldev *dev)
struct ssb_device *gpiodev;
u32 mask, set;
- b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
- & ~B43_MACCTL_GPOUTSMSK);
-
- b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK)
- | 0x000F);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
+ b43_maskset16(dev, B43_MMIO_GPIO_MASK, ~0, 0xF);
mask = 0x0000001F;
set = 0x0000000F;
@@ -2720,6 +2721,8 @@ static int b43_gpio_init(struct b43_wldev *dev)
mask |= 0x0060;
set |= 0x0060;
}
+ if (dev->dev->chip_id == 0x5354)
+ set &= 0xff02;
if (0 /* FIXME: conditional unknown */ ) {
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
@@ -2800,9 +2803,7 @@ void b43_mac_enable(struct b43_wldev *dev)
dev->mac_suspended--;
B43_WARN_ON(dev->mac_suspended < 0);
if (dev->mac_suspended == 0) {
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL)
- | B43_MACCTL_ENABLED);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_ENABLED);
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON,
B43_IRQ_MAC_SUSPENDED);
/* Commit writes */
@@ -2823,9 +2824,7 @@ void b43_mac_suspend(struct b43_wldev *dev)
if (dev->mac_suspended == 0) {
b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL)
- & ~B43_MACCTL_ENABLED);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_ENABLED, 0);
/* force pci to flush the write */
b43_read32(dev, B43_MMIO_MACCTL);
for (i = 35; i; i--) {
@@ -2931,15 +2930,10 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
* so always disable it. If we want to implement PMQ,
* we need to enable it here (clear DISCPMQ) in AP mode.
*/
- if (0 /* ctl & B43_MACCTL_AP */) {
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL)
- & ~B43_MACCTL_DISCPMQ);
- } else {
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL)
- | B43_MACCTL_DISCPMQ);
- }
+ if (0 /* ctl & B43_MACCTL_AP */)
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_DISCPMQ, 0);
+ else
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_DISCPMQ);
}
static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
@@ -3044,9 +3038,6 @@ static int b43_chip_init(struct b43_wldev *dev)
macctl |= B43_MACCTL_INFRA;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
- err = b43_request_firmware(dev);
- if (err)
- goto out;
err = b43_upload_microcode(dev);
if (err)
goto out; /* firmware is released later */
@@ -3083,10 +3074,8 @@ static int b43_chip_init(struct b43_wldev *dev)
if (dev->dev->core_rev < 5)
b43_write32(dev, 0x010C, 0x01000000);
- b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
- & ~B43_MACCTL_INFRA);
- b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
- | B43_MACCTL_INFRA);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_INFRA, 0);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_INFRA);
/* Probe Response Timeout value */
/* FIXME: Default to 0, has to be set by ioctl probably... :-/ */
@@ -4178,6 +4167,7 @@ redo:
mutex_unlock(&wl->mutex);
cancel_delayed_work_sync(&dev->periodic_work);
cancel_work_sync(&wl->tx_work);
+ cancel_work_sync(&wl->firmware_load);
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev || b43_status(dev) < B43_STAT_STARTED) {
@@ -4564,8 +4554,6 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
/* Locking: wl->mutex */
static void b43_wireless_core_exit(struct b43_wldev *dev)
{
- u32 macctl;
-
B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED);
if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
return;
@@ -4576,10 +4564,8 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
b43_set_status(dev, B43_STAT_UNINIT);
/* Stop the microcode PSM. */
- macctl = b43_read32(dev, B43_MMIO_MACCTL);
- macctl &= ~B43_MACCTL_PSM_RUN;
- macctl |= B43_MACCTL_PSM_JMP0;
- b43_write32(dev, B43_MMIO_MACCTL, macctl);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN,
+ B43_MACCTL_PSM_JMP0);
b43_dma_free(dev);
b43_pio_free(dev);
@@ -5341,16 +5327,13 @@ static int b43_bcma_probe(struct bcma_device *core)
if (err)
goto bcma_err_wireless_exit;
- err = ieee80211_register_hw(wl->hw);
- if (err)
- goto bcma_err_one_core_detach;
- b43_leds_register(wl->current_dev);
+ /* setup and start work to load firmware */
+ INIT_WORK(&wl->firmware_load, b43_request_firmware);
+ schedule_work(&wl->firmware_load);
bcma_out:
return err;
-bcma_err_one_core_detach:
- b43_one_core_detach(dev);
bcma_err_wireless_exit:
ieee80211_free_hw(wl->hw);
return err;
@@ -5417,18 +5400,13 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
if (err)
goto err_wireless_exit;
- if (first) {
- err = ieee80211_register_hw(wl->hw);
- if (err)
- goto err_one_core_detach;
- b43_leds_register(wl->current_dev);
- }
+ /* setup and start work to load firmware */
+ INIT_WORK(&wl->firmware_load, b43_request_firmware);
+ schedule_work(&wl->firmware_load);
out:
return err;
- err_one_core_detach:
- b43_one_core_detach(dev);
err_wireless_exit:
if (first)
b43_wireless_exit(dev, wl);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index bf5a43855358..108118820b36 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -85,22 +85,11 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev)
(dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
+static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (dev->phy.rev >= 6) {
- if (dev->dev->chip_id == 47162)
- return txpwrctrl_tx_gain_ipa_rev5;
- return txpwrctrl_tx_gain_ipa_rev6;
- } else if (dev->phy.rev >= 5) {
- return txpwrctrl_tx_gain_ipa_rev5;
- } else {
- return txpwrctrl_tx_gain_ipa;
- }
- } else {
- return txpwrctrl_tx_gain_ipa_5g;
- }
+ return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >>
+ B43_NPHY_RFSEQCA_RXEN_SHIFT;
}
/**************************************************
@@ -229,7 +218,7 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
reg = (i == 0) ?
B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
- b43_phy_mask(dev, reg, 0xFBFF);
+ b43_phy_set(dev, reg, 0x400);
switch (field) {
case 0:
@@ -245,7 +234,7 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
B43_NPHY_RFCTL_CMD_START);
for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START)) {
j = 0;
break;
}
@@ -264,7 +253,7 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
B43_NPHY_RFCTL_CMD_RXTX);
for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX)) {
j = 0;
break;
}
@@ -1231,12 +1220,12 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
u16 s[2];
if (dev->phy.rev >= 3) {
- save_regs_phy[0] = b43_phy_read(dev,
+ save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ save_regs_phy[2] = b43_phy_read(dev,
B43_NPHY_RFCTL_LUT_TRSW_UP1);
- save_regs_phy[1] = b43_phy_read(dev,
+ save_regs_phy[3] = b43_phy_read(dev,
B43_NPHY_RFCTL_LUT_TRSW_UP2);
- save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
- save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
@@ -1285,12 +1274,12 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
- save_regs_phy[0]);
+ save_regs_phy[2]);
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
- save_regs_phy[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
- b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
+ save_regs_phy[3]);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
@@ -1308,6 +1297,186 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
return out;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
+static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 saved_regs_phy_rfctl[2];
+ u16 saved_regs_phy[13];
+ u16 regs_to_store[] = {
+ B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
+ B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
+ B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
+ B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
+ B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
+ };
+
+ u16 class;
+
+ u16 clip_state[2];
+ u16 clip_off[2] = { 0xFFFF, 0xFFFF };
+
+ u8 vcm_final = 0;
+ s8 offset[4];
+ s32 results[8][4] = { };
+ s32 results_min[4] = { };
+ s32 poll_results[4] = { };
+
+ u16 *rssical_radio_regs = NULL;
+ u16 *rssical_phy_regs = NULL;
+
+ u16 r; /* routing */
+ u8 rx_core_state;
+ u8 core, i, j;
+
+ class = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 7, 4);
+ b43_nphy_read_clip_detection(dev, clip_state);
+ b43_nphy_write_clip_detection(dev, clip_off);
+
+ saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
+
+ b43_nphy_rf_control_intc_override(dev, 0, 0, 7);
+ b43_nphy_rf_control_intc_override(dev, 1, 1, 7);
+ b43_nphy_rf_control_override(dev, 0x1, 0, 0, false);
+ b43_nphy_rf_control_override(dev, 0x2, 1, 0, false);
+ b43_nphy_rf_control_override(dev, 0x80, 1, 0, false);
+ b43_nphy_rf_control_override(dev, 0x40, 1, 0, false);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_nphy_rf_control_override(dev, 0x20, 0, 0, false);
+ b43_nphy_rf_control_override(dev, 0x10, 1, 0, false);
+ } else {
+ b43_nphy_rf_control_override(dev, 0x10, 0, 0, false);
+ b43_nphy_rf_control_override(dev, 0x20, 1, 0, false);
+ }
+
+ rx_core_state = b43_nphy_get_rx_core_state(dev);
+ for (core = 0; core < 2; core++) {
+ if (!(rx_core_state & (1 << core)))
+ continue;
+ r = core ? B2056_RX1 : B2056_RX0;
+ b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, 2);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, 2);
+ for (i = 0; i < 8; i++) {
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
+ i << 2);
+ b43_nphy_poll_rssi(dev, 2, results[i], 8);
+ }
+ for (i = 0; i < 4; i++) {
+ s32 curr;
+ s32 mind = 40;
+ s32 minpoll = 249;
+ u8 minvcm = 0;
+ if (2 * core != i)
+ continue;
+ for (j = 0; j < 8; j++) {
+ curr = results[j][i] * results[j][i] +
+ results[j][i + 1] * results[j][i];
+ if (curr < mind) {
+ mind = curr;
+ minvcm = j;
+ }
+ if (results[j][i] < minpoll)
+ minpoll = results[j][i];
+ }
+ vcm_final = minvcm;
+ results_min[i] = minpoll;
+ }
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
+ vcm_final << 2);
+ for (i = 0; i < 4; i++) {
+ if (core != i / 2)
+ continue;
+ offset[i] = -results[vcm_final][i];
+ if (offset[i] < 0)
+ offset[i] = -((abs(offset[i]) + 4) / 8);
+ else
+ offset[i] = (offset[i] + 4) / 8;
+ if (results_min[i] == 248)
+ offset[i] = -32;
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i],
+ (i / 2 == 0) ? 1 : 2,
+ (i % 2 == 0) ? 0 : 1,
+ 2);
+ }
+ }
+ for (core = 0; core < 2; core++) {
+ if (!(rx_core_state & (1 << core)))
+ continue;
+ for (i = 0; i < 2; i++) {
+ b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, i);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i);
+ b43_nphy_poll_rssi(dev, i, poll_results, 8);
+ for (j = 0; j < 4; j++) {
+ if (j / 2 == core)
+ offset[j] = 232 - poll_results[j];
+ if (offset[j] < 0)
+ offset[j] = -(abs(offset[j] + 4) / 8);
+ else
+ offset[j] = (offset[j] + 4) / 8;
+ b43_nphy_scale_offset_rssi(dev, 0,
+ offset[2 * core], core + 1, j % 2, i);
+ }
+ }
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, saved_regs_phy_rfctl[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, saved_regs_phy_rfctl[1]);
+
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+
+ b43_phy_set(dev, B43_NPHY_TXF_40CO_B1S1, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_START);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+
+ for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
+
+ /* Store for future configuration */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
+ } else {
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
+ }
+ rssical_radio_regs[0] = b43_radio_read(dev, 0x602B);
+ rssical_radio_regs[0] = b43_radio_read(dev, 0x702B);
+ rssical_phy_regs[0] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Z);
+ rssical_phy_regs[1] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z);
+ rssical_phy_regs[2] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Z);
+ rssical_phy_regs[3] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z);
+ rssical_phy_regs[4] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_X);
+ rssical_phy_regs[5] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_X);
+ rssical_phy_regs[6] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_X);
+ rssical_phy_regs[7] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_X);
+ rssical_phy_regs[8] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Y);
+ rssical_phy_regs[9] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y);
+ rssical_phy_regs[10] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Y);
+ rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y);
+
+ /* Remember for which channel we store configuration */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ nphy->rssical_chanspec_2G.center_freq = dev->phy.channel_freq;
+ else
+ nphy->rssical_chanspec_5G.center_freq = dev->phy.channel_freq;
+
+ /* End of calibration, restore configuration */
+ b43_nphy_classifier(dev, 7, class);
+ b43_nphy_write_clip_detection(dev, clip_state);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
{
@@ -1472,12 +1641,6 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
b43_nphy_reset_cca(dev);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
-static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
-{
- /* TODO */
-}
-
/*
* RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
@@ -2229,27 +2392,12 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
*/
for (i = 0; i < 2; i++) {
- if (dev->phy.rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- txgain = *(b43_nphy_get_ipa_gain_table(dev) +
- txpi[i]);
- } else if (b43_current_band(dev->wl) ==
- IEEE80211_BAND_5GHZ) {
- /* FIXME: use 5GHz tables */
- txgain =
- b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
- } else {
- if (dev->phy.rev >= 5 &&
- sprom->fem.ghz5.extpa_gain == 3)
- ; /* FIXME: 5GHz_txgain_HiPwrEPA */
- txgain =
- b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
- }
+ txgain = *(b43_nphy_get_tx_gain_table(dev) + txpi[i]);
+
+ if (dev->phy.rev >= 3)
radio_gain = (txgain >> 16) & 0x1FFFF;
- } else {
- txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
+ else
radio_gain = (txgain >> 16) & 0x1FFF;
- }
if (dev->phy.rev >= 7)
dac_gain = (txgain >> 8) & 0x7;
@@ -2420,55 +2568,252 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
}
-static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */
+static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
{
- struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
- const u32 *table = NULL;
-#if 0
- TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
- u32 rfpwr_offset;
- u8 pga_gain;
- int i;
-#endif
+ u8 idx, delta;
+ u8 i, stf_mode;
- if (phy->rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- table = b43_nphy_get_ipa_gain_table(dev);
+ for (i = 0; i < 4; i++)
+ nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
+
+ for (stf_mode = 0; stf_mode < 4; stf_mode++) {
+ delta = 0;
+ switch (stf_mode) {
+ case 0:
+ if (dev->phy.is_40mhz && dev->phy.rev >= 5) {
+ idx = 68;
+ } else {
+ delta = 1;
+ idx = dev->phy.is_40mhz ? 52 : 4;
+ }
+ break;
+ case 1:
+ idx = dev->phy.is_40mhz ? 76 : 28;
+ break;
+ case 2:
+ idx = dev->phy.is_40mhz ? 84 : 36;
+ break;
+ case 3:
+ idx = dev->phy.is_40mhz ? 92 : 44;
+ break;
+ }
+
+ for (i = 0; i < 20; i++) {
+ nphy->adj_pwr_tbl[4 + 4 * i + stf_mode] =
+ nphy->tx_power_offset[idx];
+ if (i == 0)
+ idx += delta;
+ if (i == 14)
+ idx += 1 - delta;
+ if (i == 3 || i == 4 || i == 7 || i == 8 || i == 11 ||
+ i == 13)
+ idx += 1;
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
+static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ s16 a1[2], b0[2], b1[2];
+ u8 idle[2];
+ s8 target[2];
+ s32 num, den, pwr;
+ u32 regval[64];
+
+ u16 freq = dev->phy.channel_freq;
+ u16 tmp;
+ u16 r; /* routing */
+ u8 i, c;
+
+ if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) {
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0, 0x200000);
+ b43_read32(dev, B43_MMIO_MACCTL);
+ udelay(1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_phy_set(dev, B43_NPHY_TSSIMODE, B43_NPHY_TSSIMODE_EN);
+ if (dev->phy.rev >= 3)
+ b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_PCTLEN & 0xFFFF);
+ else
+ b43_phy_set(dev, B43_NPHY_TXPCTL_CMD,
+ B43_NPHY_TXPCTL_CMD_PCTLEN);
+
+ if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12)
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0x200000, 0);
+
+ if (sprom->revision < 4) {
+ idle[0] = nphy->pwr_ctl_info[0].idle_tssi_2g;
+ idle[1] = nphy->pwr_ctl_info[1].idle_tssi_2g;
+ target[0] = target[1] = 52;
+ a1[0] = a1[1] = -424;
+ b0[0] = b0[1] = 5612;
+ b1[0] = b1[1] = -1393;
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ for (c = 0; c < 2; c++) {
+ idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
+ target[c] = sprom->core_pwr_info[c].maxpwr_2g;
+ a1[c] = sprom->core_pwr_info[c].pa_2g[0];
+ b0[c] = sprom->core_pwr_info[c].pa_2g[1];
+ b1[c] = sprom->core_pwr_info[c].pa_2g[2];
+ }
+ } else if (freq >= 4900 && freq < 5100) {
+ for (c = 0; c < 2; c++) {
+ idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g;
+ target[c] = sprom->core_pwr_info[c].maxpwr_5gl;
+ a1[c] = sprom->core_pwr_info[c].pa_5gl[0];
+ b0[c] = sprom->core_pwr_info[c].pa_5gl[1];
+ b1[c] = sprom->core_pwr_info[c].pa_5gl[2];
+ }
+ } else if (freq >= 5100 && freq < 5500) {
+ for (c = 0; c < 2; c++) {
+ idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g;
+ target[c] = sprom->core_pwr_info[c].maxpwr_5g;
+ a1[c] = sprom->core_pwr_info[c].pa_5g[0];
+ b0[c] = sprom->core_pwr_info[c].pa_5g[1];
+ b1[c] = sprom->core_pwr_info[c].pa_5g[2];
+ }
+ } else if (freq >= 5500) {
+ for (c = 0; c < 2; c++) {
+ idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g;
+ target[c] = sprom->core_pwr_info[c].maxpwr_5gh;
+ a1[c] = sprom->core_pwr_info[c].pa_5gh[0];
+ b0[c] = sprom->core_pwr_info[c].pa_5gh[1];
+ b1[c] = sprom->core_pwr_info[c].pa_5gh[2];
+ }
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- if (phy->rev == 3)
- table = b43_ntab_tx_gain_rev3_5ghz;
- if (phy->rev == 4)
- table = b43_ntab_tx_gain_rev4_5ghz;
- else
- table = b43_ntab_tx_gain_rev5plus_5ghz;
+ idle[0] = nphy->pwr_ctl_info[0].idle_tssi_5g;
+ idle[1] = nphy->pwr_ctl_info[1].idle_tssi_5g;
+ target[0] = target[1] = 52;
+ a1[0] = a1[1] = -424;
+ b0[0] = b0[1] = 5612;
+ b1[0] = b1[1] = -1393;
+ }
+ }
+ /* target[0] = target[1] = nphy->tx_power_max; */
+
+ if (dev->phy.rev >= 3) {
+ if (sprom->fem.ghz2.tssipos)
+ b43_phy_set(dev, B43_NPHY_TXPCTL_ITSSI, 0x4000);
+ if (dev->phy.rev >= 7) {
+ for (c = 0; c < 2; c++) {
+ r = c ? 0x190 : 0x170;
+ if (b43_nphy_ipa(dev))
+ b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC);
+ }
+ } else {
+ if (b43_nphy_ipa(dev)) {
+ tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ b43_radio_write(dev,
+ B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
+ b43_radio_write(dev,
+ B2056_TX1 | B2056_TX_TX_SSI_MUX, tmp);
} else {
- table = b43_ntab_tx_gain_rev3plus_2ghz;
+ b43_radio_write(dev,
+ B2056_TX0 | B2056_TX_TX_SSI_MUX, 0x11);
+ b43_radio_write(dev,
+ B2056_TX1 | B2056_TX_TX_SSI_MUX, 0x11);
}
}
+ }
+
+ if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) {
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0, 0x200000);
+ b43_read32(dev, B43_MMIO_MACCTL);
+ udelay(1);
+ }
+
+ if (dev->phy.rev >= 7) {
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT, 0x19);
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+ ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x19);
} else {
- table = b43_ntab_tx_gain_rev0_1_2;
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT, 0x40);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+ ~B43_NPHY_TXPCTL_INIT_PIDXI1, 0x40);
+ }
+
+ if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12)
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~0x200000, 0);
+
+ b43_phy_write(dev, B43_NPHY_TXPCTL_N,
+ 0xF0 << B43_NPHY_TXPCTL_N_TSSID_SHIFT |
+ 3 << B43_NPHY_TXPCTL_N_NPTIL2_SHIFT);
+ b43_phy_write(dev, B43_NPHY_TXPCTL_ITSSI,
+ idle[0] << B43_NPHY_TXPCTL_ITSSI_0_SHIFT |
+ idle[1] << B43_NPHY_TXPCTL_ITSSI_1_SHIFT |
+ B43_NPHY_TXPCTL_ITSSI_BINF);
+ b43_phy_write(dev, B43_NPHY_TXPCTL_TPWR,
+ target[0] << B43_NPHY_TXPCTL_TPWR_0_SHIFT |
+ target[1] << B43_NPHY_TXPCTL_TPWR_1_SHIFT);
+
+ for (c = 0; c < 2; c++) {
+ for (i = 0; i < 64; i++) {
+ num = 8 * (16 * b0[c] + b1[c] * i);
+ den = 32768 + a1[c] * i;
+ pwr = max((4 * num + den / 2) / den, -8);
+ if (dev->phy.rev < 3 && (i <= (31 - idle[c] + 1)))
+ pwr = max(pwr, target[c] + 1);
+ regval[i] = pwr;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(26 + c, 0), 64, regval);
}
+
+ b43_nphy_tx_prepare_adjusted_power_table(dev);
+ /*
+ b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
+ b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
+ */
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ const u32 *table = NULL;
+ u32 rfpwr_offset;
+ u8 pga_gain;
+ int i;
+
+ table = b43_nphy_get_tx_gain_table(dev);
b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
if (phy->rev >= 3) {
#if 0
nphy->gmval = (table[0] >> 16) & 0x7000;
+#endif
for (i = 0; i < 128; i++) {
pga_gain = (table[i] >> 24) & 0xF;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+ rfpwr_offset =
+ b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
else
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
+ rfpwr_offset =
+ 0; /* FIXME */
b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
rfpwr_offset);
b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
rfpwr_offset);
}
-#endif
}
}
@@ -3139,32 +3484,13 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
for (i = 0; i < 2; ++i) {
+ table = b43_nphy_get_tx_gain_table(dev);
if (dev->phy.rev >= 3) {
- enum ieee80211_band band =
- b43_current_band(dev->wl);
-
- if (b43_nphy_ipa(dev)) {
- table = b43_nphy_get_ipa_gain_table(dev);
- } else {
- if (band == IEEE80211_BAND_5GHZ) {
- if (dev->phy.rev == 3)
- table = b43_ntab_tx_gain_rev3_5ghz;
- else if (dev->phy.rev == 4)
- table = b43_ntab_tx_gain_rev4_5ghz;
- else
- table = b43_ntab_tx_gain_rev5plus_5ghz;
- } else {
- table = b43_ntab_tx_gain_rev3plus_2ghz;
- }
- }
-
target.ipa[i] = (table[index[i]] >> 16) & 0xF;
target.pad[i] = (table[index[i]] >> 20) & 0xF;
target.pga[i] = (table[index[i]] >> 24) & 0xF;
target.txgm[i] = (table[index[i]] >> 28) & 0xF;
} else {
- table = b43_ntab_tx_gain_rev0_1_2;
-
target.ipa[i] = (table[index[i]] >> 16) & 0x3;
target.pad[i] = (table[index[i]] >> 18) & 0x3;
target.pga[i] = (table[index[i]] >> 20) & 0x7;
@@ -3968,13 +4294,10 @@ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
#endif
}
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL) &
- ~B43_MACCTL_GPOUTSMSK);
- b43_write16(dev, B43_MMIO_GPIO_MASK,
- b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
- b43_write16(dev, B43_MMIO_GPIO_CONTROL,
- b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+ b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
+ b43_maskset16(dev, B43_MMIO_GPIO_MASK, ~0, 0xFC00);
+ b43_maskset16(dev, B43_MMIO_GPIO_CONTROL, (~0xFC00 & 0xFFFF),
+ 0);
if (init) {
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
@@ -4110,7 +4433,7 @@ int b43_phy_initn(struct b43_wldev *dev)
b43_nphy_tx_power_ctrl(dev, false);
b43_nphy_tx_power_fix(dev);
b43_nphy_tx_power_ctl_idle_tssi(dev);
- /* TODO N PHY TX Power Control Setup */
+ b43_nphy_tx_power_ctl_setup(dev);
b43_nphy_tx_gain_table_upload(dev);
if (nphy->phyrxchain != 3)
@@ -4530,8 +4853,7 @@ static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
{
check_phyreg(dev, reg);
b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
- b43_write16(dev, B43_MMIO_PHY_DATA,
- (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
+ b43_maskset16(dev, B43_MMIO_PHY_DATA, mask, set);
}
static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 5de8f74cc02f..fd12b386fea1 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -798,6 +798,7 @@ struct b43_phy_n {
bool txpwrctrl;
bool pwg_gain_5ghz;
u8 tx_pwr_idx[2];
+ s8 tx_power_offset[101];
u16 adj_pwr_tbl[84];
u16 txcal_bbmult;
u16 txiqlocal_bestc[11];
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index f7def13524dd..f0d8377429c6 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2214,7 +2214,7 @@ static const u16 b43_ntab_antswctl2g_r3[4][32] = {
};
/* TX gain tables */
-const u32 b43_ntab_tx_gain_rev0_1_2[] = {
+static const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
@@ -2249,7 +2249,7 @@ const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03801442, 0x03801344, 0x03801342, 0x00002b00,
};
-const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
+static const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
@@ -2284,7 +2284,7 @@ const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
};
-const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
+static const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
@@ -2319,7 +2319,7 @@ const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
};
-const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
+static const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
@@ -2354,7 +2354,7 @@ const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
};
-const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
+static const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
@@ -2389,7 +2389,7 @@ const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
0x0062003b, 0x00620039, 0x00620037, 0x00620035,
};
-const u32 txpwrctrl_tx_gain_ipa[] = {
+static const u32 txpwrctrl_tx_gain_ipa[] = {
0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
@@ -2424,7 +2424,7 @@ const u32 txpwrctrl_tx_gain_ipa[] = {
0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
};
-const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
+static const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
@@ -2459,7 +2459,7 @@ const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
};
-const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
+static const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
@@ -2494,7 +2494,7 @@ const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
};
-const u32 txpwrctrl_tx_gain_ipa_5g[] = {
+static const u32 txpwrctrl_tx_gain_ipa_5g[] = {
0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
@@ -2529,6 +2529,11 @@ const u32 txpwrctrl_tx_gain_ipa_5g[] = {
0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
};
+const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[] = {
+ -114, -108, -98, -91, -84, -78, -70, -62,
+ -54, -46, -39, -31, -23, -15, -8, 0
+};
+
const u16 tbl_iqcal_gainparams[2][9][8] = {
{
{ 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
@@ -2739,11 +2744,11 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */
{ 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */
{ 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */
- { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
+ { 0x0010, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
{ 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */
{ 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */
- { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
- { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
+ { 0x0080, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
+ { 0x0100, 8, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
{ 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */
{ 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */
{ 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */
@@ -3126,6 +3131,53 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
B43_WARN_ON(1);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ if (dev->dev->chip_id == 47162)
+ return txpwrctrl_tx_gain_ipa_rev5;
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
+}
+
+const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
+{
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ if (dev->phy.rev < 3)
+ return b43_ntab_tx_gain_rev0_1_2;
+
+ /* rev 3+ */
+ if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
+ (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) {
+ return b43_nphy_get_ipa_gain_table(dev);
+ } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (dev->phy.rev == 3)
+ return b43_ntab_tx_gain_rev3_5ghz;
+ if (dev->phy.rev == 4)
+ return sprom->fem.ghz5.extpa_gain == 3 ?
+ b43_ntab_tx_gain_rev4_5ghz :
+ b43_ntab_tx_gain_rev4_5ghz; /* FIXME */
+ else
+ return b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ if (dev->phy.rev >= 5 && sprom->fem.ghz5.extpa_gain == 3)
+ return b43_ntab_tx_gain_rev3plus_2ghz; /* FIXME */
+ else
+ return b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+}
+
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
struct b43_wldev *dev, bool ghz5, bool ext_lna)
{
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 97038c481930..f348953c0230 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -177,16 +177,10 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
-extern const u32 b43_ntab_tx_gain_rev0_1_2[];
-extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
-extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
-extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
-extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
-
-extern const u32 txpwrctrl_tx_gain_ipa[];
-extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
-extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
-extern const u32 txpwrctrl_tx_gain_ipa_5g[];
+const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev);
+
+extern const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[];
+
extern const u16 tbl_iqcal_gainparams[2][9][8];
extern const struct nphy_txiqcal_ladder ladder_lo[];
extern const struct nphy_txiqcal_ladder ladder_iq[];
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 98e3d44400c6..a29da674e69d 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -581,6 +581,9 @@ struct b43legacy_wl {
struct mutex mutex; /* locks wireless core state */
spinlock_t leds_lock; /* lock for leds */
+ /* firmware loading work */
+ struct work_struct firmware_load;
+
/* We can only have one operating interface (802.11 core)
* at a time. General information about this interface follows.
*/
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 75e70bce40f6..df7e16dfb36c 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1557,8 +1557,15 @@ err_format:
return -EPROTO;
}
-static int b43legacy_request_firmware(struct b43legacy_wldev *dev)
+static int b43legacy_one_core_attach(struct ssb_device *dev,
+ struct b43legacy_wl *wl);
+static void b43legacy_one_core_detach(struct ssb_device *dev);
+
+static void b43legacy_request_firmware(struct work_struct *work)
{
+ struct b43legacy_wl *wl = container_of(work,
+ struct b43legacy_wl, firmware_load);
+ struct b43legacy_wldev *dev = wl->current_dev;
struct b43legacy_firmware *fw = &dev->fw;
const u8 rev = dev->dev->id.revision;
const char *filename;
@@ -1624,8 +1631,14 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev)
if (err)
goto err_load;
}
+ err = ieee80211_register_hw(wl->hw);
+ if (err)
+ goto err_one_core_detach;
+ return;
- return 0;
+err_one_core_detach:
+ b43legacy_one_core_detach(dev->dev);
+ goto error;
err_load:
b43legacy_print_fw_helptext(dev->wl);
@@ -1639,7 +1652,7 @@ err_no_initvals:
error:
b43legacy_release_firmware(dev);
- return err;
+ return;
}
static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
@@ -2153,9 +2166,6 @@ static int b43legacy_chip_init(struct b43legacy_wldev *dev)
macctl |= B43legacy_MACCTL_INFRA;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
- err = b43legacy_request_firmware(dev);
- if (err)
- goto out;
err = b43legacy_upload_microcode(dev);
if (err)
goto out; /* firmware is released later */
@@ -3860,17 +3870,13 @@ static int b43legacy_probe(struct ssb_device *dev,
if (err)
goto err_wireless_exit;
- if (first) {
- err = ieee80211_register_hw(wl->hw);
- if (err)
- goto err_one_core_detach;
- }
+ /* setup and start work to load firmware */
+ INIT_WORK(&wl->firmware_load, b43legacy_request_firmware);
+ schedule_work(&wl->firmware_load);
out:
return err;
-err_one_core_detach:
- b43legacy_one_core_detach(dev);
err_wireless_exit:
if (first)
b43legacy_wireless_exit(dev, wl);
@@ -3885,6 +3891,7 @@ static void b43legacy_remove(struct ssb_device *dev)
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
+ cancel_work_sync(&wl->firmware_load);
B43legacy_WARN_ON(!wl);
if (wl->current_dev == wldev)
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 96faaef3661b..950334197f40 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -1860,7 +1860,7 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
* which accounts for the factor of 4 */
#define REG_MAX_PWR 20
max_pwr = min(REG_MAX_PWR * 4
- - dev->dev->bus->sprom.antenna_gain.ghz24.a0
+ - dev->dev->bus->sprom.antenna_gain.a0
- 0x6, max_pwr);
/* find the desired power in Q5.2 - power_level is in dBm
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index cd6375de2a60..c5104533e24e 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -26,16 +26,25 @@ config BRCMFMAC
it'll be called brcmfmac.ko.
config BRCMFMAC_SDIO
- bool "SDIO bus interface support for FullMAC"
+ bool "SDIO bus interface support for FullMAC driver"
depends on MMC
depends on BRCMFMAC
select FW_LOADER
default y
---help---
This option enables the SDIO bus interface support for Broadcom
- FullMAC WLAN driver.
- Say Y if you want to use brcmfmac for a compatible SDIO interface
- wireless card.
+ IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
+ use the driver for a SDIO wireless card.
+
+config BRCMFMAC_USB
+ bool "USB bus interface support for FullMAC driver"
+ depends on USB
+ depends on BRCMFMAC
+ select FW_LOADER
+ ---help---
+ This option enables the USB bus interface support for Broadcom
+ IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
+ use the driver for an USB wireless card.
config BRCMDBG
bool "Broadcom driver debug functions"
diff --git a/drivers/net/wireless/brcm80211/Makefile b/drivers/net/wireless/brcm80211/Makefile
index f41c047eca82..b987920e982e 100644
--- a/drivers/net/wireless/brcm80211/Makefile
+++ b/drivers/net/wireless/brcm80211/Makefile
@@ -16,7 +16,7 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# common flags
-subdir-ccflags-$(CONFIG_BRCMDBG) += -DBCMDBG
+subdir-ccflags-$(CONFIG_BRCMDBG) += -DDEBUG
obj-$(CONFIG_BRCMUTIL) += brcmutil/
obj-$(CONFIG_BRCMFMAC) += brcmfmac/
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 9ca9ea1135ea..abb48032753b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -19,6 +19,8 @@ ccflags-y += \
-Idrivers/net/wireless/brcm80211/brcmfmac \
-Idrivers/net/wireless/brcm80211/include
+ccflags-y += -D__CHECK_ENDIAN__
+
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
@@ -30,5 +32,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
bcmsdh.o \
bcmsdh_sdmmc.o \
sdio_chip.o
-
-ccflags-y += -D__CHECK_ENDIAN__
+brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
+ usb.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 4bc8d251acf8..e925290b432b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -15,6 +15,8 @@
*/
/* ****************** SDIO CARD Interface Functions **************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/export.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 9b8c0ed833d4..4688904908ec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -13,6 +13,9 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/sdio.h>
@@ -291,13 +294,14 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
struct sk_buff *pkt)
{
int status;
- uint pkt_len = pkt->len;
+ uint pkt_len;
bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(TRACE, "Enter\n");
if (pkt == NULL)
return -EINVAL;
+ pkt_len = pkt->len;
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
@@ -485,7 +489,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func;
sdiodev->bus_if = bus_if;
- bus_if->bus_priv = sdiodev;
+ bus_if->bus_priv.sdio = sdiodev;
bus_if->type = SDIO_BUS;
bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->card->dev, sdiodev);
@@ -526,7 +530,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
if (func->num == 2) {
bus_if = dev_get_drvdata(&func->dev);
- sdiodev = bus_if->bus_priv;
+ sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL);
@@ -593,14 +597,14 @@ static struct sdio_driver brcmf_sdmmc_driver = {
#endif /* CONFIG_PM_SLEEP */
};
-static void __exit brcmf_sdio_exit(void)
+void brcmf_sdio_exit(void)
{
brcmf_dbg(TRACE, "Enter\n");
sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-static int __init brcmf_sdio_init(void)
+void brcmf_sdio_init(void)
{
int ret;
@@ -610,9 +614,4 @@ static int __init brcmf_sdio_init(void)
if (ret)
brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
-
- return ret;
}
-
-module_init(brcmf_sdio_init);
-module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index e58ea40a75b0..07686a748d3c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -644,9 +644,9 @@ extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
-#ifdef BCMDBG
+#ifdef DEBUG
extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
-#endif /* BCMDBG */
+#endif /* DEBUG */
extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index ad9be2410b59..366916494be4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -39,8 +39,11 @@ struct dngl_stats {
/* interface structure between common and bus layer */
struct brcmf_bus {
u8 type; /* bus type */
- void *bus_priv; /* pointer to bus private structure */
- void *drvr; /* pointer to driver pub structure brcmf_pub */
+ union {
+ struct brcmf_sdio_dev *sdio;
+ struct brcmf_usbdev *usb;
+ } bus_priv;
+ struct brcmf_pub *drvr; /* pointer to driver pub structure brcmf_pub */
enum brcmf_bus_state state;
uint maxctl; /* Max size rxctl request from proto to bus */
bool drvr_up; /* Status flag of driver up/down */
@@ -102,4 +105,14 @@ extern int brcmf_bus_start(struct device *dev);
extern int brcmf_add_if(struct device *dev, int ifidx,
char *name, u8 *mac_addr);
+
+#ifdef CONFIG_BRCMFMAC_SDIO
+extern void brcmf_sdio_exit(void);
+extern void brcmf_sdio_init(void);
+#endif
+#ifdef CONFIG_BRCMFMAC_USB
+extern void brcmf_usb_exit(void);
+extern void brcmf_usb_init(void);
+#endif
+
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index ac8d1f437888..b3e3b7f25d82 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -19,6 +19,8 @@
* For certain dcmd codes, the dongle interprets string data from the host.
******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index a51d8f5d36fc..4187435220f3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -13,6 +13,9 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sched.h>
@@ -38,7 +41,7 @@
#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN \
offsetof(struct brcmf_pkt_filter_pattern_le, mask_and_pattern)
-#ifdef BCMDBG
+#ifdef DEBUG
static const char brcmf_version[] =
"Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on "
__DATE__ " at " __TIME__;
@@ -133,7 +136,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
return p != NULL;
}
-#ifdef BCMDBG
+#ifdef DEBUG
static void
brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
{
@@ -399,10 +402,10 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
p = (char *)&buf[sizeof(struct msgtrace_hdr)];
while ((s = strstr(p, "\n")) != NULL) {
*s = '\0';
- printk(KERN_DEBUG"%s\n", p);
+ pr_debug("%s\n", p);
p = s + 1;
}
- printk(KERN_DEBUG "%s\n", p);
+ pr_debug("%s\n", p);
/* Reset datalen to avoid display below */
datalen = 0;
@@ -430,7 +433,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
brcmf_dbg(EVENT, "\n");
}
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
int
brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
@@ -518,9 +521,9 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
break;
}
-#ifdef BCMDBG
+#ifdef DEBUG
brcmf_c_show_host_event(event, event_data);
-#endif /* BCMDBG */
+#endif /* DEBUG */
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index bb26ee36bc68..a2c4576cf9ff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -32,20 +32,20 @@
#define BRCMF_BTA_VAL 0x1000
#define BRCMF_ISCAN_VAL 0x2000
-#if defined(BCMDBG)
+#if defined(DEBUG)
#define brcmf_dbg(level, fmt, ...) \
do { \
if (BRCMF_ERROR_VAL == BRCMF_##level##_VAL) { \
if (brcmf_msg_level & BRCMF_##level##_VAL) { \
if (net_ratelimit()) \
- printk(KERN_DEBUG "%s: " fmt, \
- __func__, ##__VA_ARGS__); \
+ pr_debug("%s: " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} else { \
if (brcmf_msg_level & BRCMF_##level##_VAL) { \
- printk(KERN_DEBUG "%s: " fmt, \
- __func__, ##__VA_ARGS__); \
+ pr_debug("%s: " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} \
} while (0)
@@ -56,7 +56,7 @@ do { \
#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
-#else /* (defined BCMDBG) || (defined BCMDBG) */
+#else /* (defined DEBUG) || (defined DEBUG) */
#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
@@ -66,7 +66,13 @@ do { \
#define BRCMF_BYTES_ON() 0
#define BRCMF_GLOM_ON() 0
-#endif /* defined(BCMDBG) */
+#endif /* defined(DEBUG) */
+
+#define brcmf_dbg_hex_dump(test, data, len, fmt, ...) \
+do { \
+ if (test) \
+ brcmu_dbg_hex_dump(data, len, fmt, ##__VA_ARGS__); \
+} while (0)
extern int brcmf_msg_level;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index eb9eb766ac27..2a1e5ae0c402 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -14,6 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -590,8 +592,8 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
sprintf(info->bus_info, "%s", dev_name(drvr->dev));
}
-static struct ethtool_ops brcmf_ethtool_ops = {
- .get_drvinfo = brcmf_ethtool_get_drvinfo
+static const struct ethtool_ops brcmf_ethtool_ops = {
+ .get_drvinfo = brcmf_ethtool_get_drvinfo,
};
static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
@@ -794,18 +796,19 @@ static int brcmf_netdev_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_bus *bus_if = drvr->bus_if;
u32 toe_ol;
s32 ret = 0;
brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
if (ifp->idx == 0) { /* do it only for primary eth0 */
- /* try to bring up bus */
- ret = brcmf_bus_start(drvr->dev);
- if (ret != 0) {
- brcmf_dbg(ERROR, "failed with code %d\n", ret);
- return -1;
+ /* If bus is not ready, can't continue */
+ if (bus_if->state != BRCMF_BUS_DATA) {
+ brcmf_dbg(ERROR, "failed bus is not ready\n");
+ return -EAGAIN;
}
+
atomic_set(&drvr->pend_8021x_cnt, 0);
memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
@@ -977,12 +980,6 @@ int brcmf_bus_start(struct device *dev)
return ret;
}
- /* If bus is not ready, can't come up */
- if (bus_if->state != BRCMF_BUS_DATA) {
- brcmf_dbg(ERROR, "failed bus is not ready\n");
- return -ENODEV;
- }
-
brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
iovbuf, sizeof(iovbuf));
brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
@@ -1019,6 +1016,8 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
return ret;
+ /* signal bus ready */
+ bus_if->state = BRCMF_BUS_DATA;
return 0;
}
@@ -1107,13 +1106,13 @@ void brcmf_detach(struct device *dev)
if (drvr->iflist[i])
brcmf_del_if(drvr, i);
- cancel_work_sync(&drvr->setmacaddr_work);
- cancel_work_sync(&drvr->multicast_work);
-
brcmf_bus_detach(drvr);
- if (drvr->prot)
+ if (drvr->prot) {
+ cancel_work_sync(&drvr->setmacaddr_work);
+ cancel_work_sync(&drvr->multicast_work);
brcmf_proto_detach(drvr);
+ }
bus_if->drvr = NULL;
kfree(drvr);
@@ -1146,7 +1145,7 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
return pend;
}
-#ifdef BCMDBG
+#ifdef DEBUG
int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size)
{
int ret = 0;
@@ -1180,4 +1179,38 @@ exit:
return ret;
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
+
+static void brcmf_driver_init(struct work_struct *work)
+{
+#ifdef CONFIG_BRCMFMAC_SDIO
+ brcmf_sdio_init();
+#endif
+#ifdef CONFIG_BRCMFMAC_USB
+ brcmf_usb_init();
+#endif
+}
+static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
+
+static int __init brcmfmac_module_init(void)
+{
+ if (!schedule_work(&brcmf_driver_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void __exit brcmfmac_module_exit(void)
+{
+ cancel_work_sync(&brcmf_driver_work);
+
+#ifdef CONFIG_BRCMFMAC_SDIO
+ brcmf_sdio_exit();
+#endif
+#ifdef CONFIG_BRCMFMAC_USB
+ brcmf_usb_exit();
+#endif
+}
+
+module_init(brcmfmac_module_init);
+module_exit(brcmfmac_module_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index f7eeee1dcdb6..2bf5dda29291 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -14,6 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -40,7 +42,7 @@
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
-#ifdef BCMDBG
+#ifdef DEBUG
#define BRCMF_TRAP_INFO_SIZE 80
@@ -84,7 +86,7 @@ struct rte_console {
char cbuf[CBUF_LEN];
};
-#endif /* BCMDBG */
+#endif /* DEBUG */
#include <chipcommon.h>
#include "dhd_bus.h"
@@ -307,10 +309,10 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
-#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
-MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
-MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
+#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
+#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
+MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
+MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
@@ -416,7 +418,7 @@ struct sdpcmd_regs {
u16 PAD[0x80];
};
-#ifdef BCMDBG
+#ifdef DEBUG
/* Device console log buffer state */
struct brcmf_console {
uint count; /* Poll interval msec counter */
@@ -426,7 +428,7 @@ struct brcmf_console {
u8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
};
-#endif /* BCMDBG */
+#endif /* DEBUG */
struct sdpcm_shared {
u32 flags;
@@ -507,11 +509,11 @@ struct brcmf_sdio {
uint polltick; /* Tick counter */
uint pollcnt; /* Count of active polls */
-#ifdef BCMDBG
+#ifdef DEBUG
uint console_interval;
struct brcmf_console console; /* Console output polling support */
uint console_addr; /* Console address from shared struct */
-#endif /* BCMDBG */
+#endif /* DEBUG */
uint regfails; /* Count of R_REG failures */
@@ -587,10 +589,10 @@ struct brcmf_sdio {
#define CLK_PENDING 2 /* Not used yet */
#define CLK_AVAIL 3
-#ifdef BCMDBG
+#ifdef DEBUG
static int qcount[NUMPRIO];
static int tx_packets[NUMPRIO];
-#endif /* BCMDBG */
+#endif /* DEBUG */
#define SDIO_DRIVE_STRENGTH 6 /* in milliamps */
@@ -764,12 +766,12 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
bus->clkstate = CLK_AVAIL;
brcmf_dbg(INFO, "CLKCTL: turned ON\n");
-#if defined(BCMDBG)
- if (bus->alp_only != true) {
+#if defined(DEBUG)
+ if (!bus->alp_only) {
if (SBSDIO_ALPONLY(clkctl))
brcmf_dbg(ERROR, "HT Clock should be on\n");
}
-#endif /* defined (BCMDBG) */
+#endif /* defined (DEBUG) */
bus->activity = true;
} else {
@@ -814,9 +816,9 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
/* Transition SD and backplane clock readiness */
static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
-#ifdef BCMDBG
+#ifdef DEBUG
uint oldstate = bus->clkstate;
-#endif /* BCMDBG */
+#endif /* DEBUG */
brcmf_dbg(TRACE, "Enter\n");
@@ -861,9 +863,9 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
brcmf_sdbrcm_wd_timer(bus, 0);
break;
}
-#ifdef BCMDBG
+#ifdef DEBUG
brcmf_dbg(INFO, "%d -> %d\n", oldstate, bus->clkstate);
-#endif /* BCMDBG */
+#endif /* DEBUG */
return 0;
}
@@ -1279,13 +1281,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
return 0;
}
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- printk(KERN_DEBUG "SUPERFRAME:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- pfirst->data, min_t(int, pfirst->len, 48));
- }
-#endif
+
+ brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+ pfirst->data, min_t(int, pfirst->len, 48),
+ "SUPERFRAME:\n");
/* Validate the superframe header */
dptr = (u8 *) (pfirst->data);
@@ -1362,13 +1361,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
check = get_unaligned_le16(dptr + sizeof(u16));
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- printk(KERN_DEBUG "subframe:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- dptr, 32);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+ dptr, 32, "subframe:\n");
if ((u16)~(sublen ^ check)) {
brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
@@ -1433,13 +1427,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
rxseq++;
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- printk(KERN_DEBUG "Rx Subframe Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- dptr, dlen);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
+ dptr, dlen, "Rx Subframe Data:\n");
__skb_trim(pfirst, sublen);
skb_pull(pfirst, doff);
@@ -1457,17 +1446,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
continue;
}
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
- bus->glom.qlen, pfirst, pfirst->data,
- pfirst->len, pfirst->next,
- pfirst->prev);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- pfirst->data,
- min_t(int, pfirst->len, 32));
- }
-#endif /* BCMDBG */
+ brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+ pfirst->data,
+ min_t(int, pfirst->len, 32),
+ "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
+ bus->glom.qlen, pfirst, pfirst->data,
+ pfirst->len, pfirst->next,
+ pfirst->prev);
}
/* sent any remaining packets up */
if (bus->glom.qlen) {
@@ -1584,12 +1569,8 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
gotpkt:
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
- printk(KERN_DEBUG "RxCtrl:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bus->rxctl, len);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+ bus->rxctl, len, "RxCtrl:\n");
/* Point to valid data and indicate its length */
bus->rxctl += doff;
@@ -1818,17 +1799,13 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
}
bus->tx_max = txmax;
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- printk(KERN_DEBUG "Rx Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- rxbuf, len);
- } else if (BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "RxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- bus->rxhdr, SDPCM_HDRLEN);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
+ rxbuf, len, "Rx Data:\n");
+ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
+ BRCMF_DATA_ON()) &&
+ BRCMF_HDRS_ON(),
+ bus->rxhdr, SDPCM_HDRLEN,
+ "RxHdr:\n");
if (chan == SDPCM_CONTROL_CHANNEL) {
brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n",
@@ -1865,13 +1842,9 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
brcmf_sdbrcm_rxfail(bus, true, true);
continue;
}
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() || BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "RxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- bus->rxhdr, SDPCM_HDRLEN);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
+ bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n");
+
/* Extract hardware header fields */
len = get_unaligned_le16(bus->rxhdr);
@@ -2024,13 +1997,8 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
skb_push(pkt, BRCMF_FIRSTREAD);
memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD);
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
- printk(KERN_DEBUG "Rx Data:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- pkt->data, len);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
+ pkt->data, len, "Rx Data:\n");
deliver:
/* Save superframe descriptor and allocate packet frame */
@@ -2038,14 +2006,9 @@ deliver:
if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
len);
-#ifdef BCMDBG
- if (BRCMF_GLOM_ON()) {
- printk(KERN_DEBUG "Glom Data:\n");
- print_hex_dump_bytes("",
- DUMP_PREFIX_OFFSET,
- pkt->data, len);
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+ pkt->data, len,
+ "Glom Data:\n");
__skb_trim(pkt, len);
skb_pull(pkt, SDPCM_HDRLEN);
bus->glomd = pkt;
@@ -2078,13 +2041,11 @@ deliver:
down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
-#ifdef BCMDBG
/* Message if we hit the limit */
if (!rxleft)
brcmf_dbg(DATA, "hit rx limit of %d frames\n",
maxframes);
else
-#endif /* BCMDBG */
brcmf_dbg(DATA, "processed %d frames\n", rxcount);
/* Back off rxseq if awaiting rtx, update rx_seq */
if (bus->rxskip)
@@ -2098,8 +2059,7 @@ static void
brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
{
up(&bus->sdsem);
- wait_event_interruptible_timeout(bus->ctrl_wait,
- (*lockvar == false), HZ * 2);
+ wait_event_interruptible_timeout(bus->ctrl_wait, !*lockvar, HZ * 2);
down(&bus->sdsem);
return;
}
@@ -2176,20 +2136,22 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
-#ifdef BCMDBG
+#ifdef DEBUG
tx_packets[pkt->priority]++;
- if (BRCMF_BYTES_ON() &&
- (((BRCMF_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
- (BRCMF_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
- printk(KERN_DEBUG "Tx Frame:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, frame, len);
- } else if (BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "TxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- frame, min_t(u16, len, 16));
- }
#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
+ frame, len, "Tx Frame:\n");
+ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() &&
+ chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() &&
+ chan != SDPCM_CONTROL_CHANNEL))) &&
+ BRCMF_HDRS_ON(),
+ frame, min_t(u16, len, 16), "TxHdr:\n");
+
/* Raise len to next SDIO block to eliminate tail command */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
u16 pad = bus->blocksize - (len % bus->blocksize);
@@ -2314,7 +2276,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
uint retries;
int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2410,7 +2372,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
int err;
u8 clkctl, devctl = 0;
-#ifdef BCMDBG
+#ifdef DEBUG
/* Check for inconsistent device control */
devctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_DEVICE_CTL, &err);
@@ -2418,7 +2380,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
@@ -2664,7 +2626,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
int ret = -EBADE;
uint datalen, prec;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2684,8 +2646,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
- if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) ==
- false) {
+ if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
skb_pull(pkt, SDPCM_HDRLEN);
brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
brcmu_pkt_buf_free_skb(pkt);
@@ -2701,7 +2662,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
}
-#ifdef BCMDBG
+#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
@@ -2774,7 +2735,7 @@ xfer_done:
return bcmerror;
}
-#ifdef BCMDBG
+#ifdef DEBUG
#define CONSOLE_LINE_MAX 192
static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
@@ -2845,14 +2806,14 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
if (line[n - 1] == '\r')
n--;
line[n] = 0;
- printk(KERN_DEBUG "CONSOLE: %s\n", line);
+ pr_debug("CONSOLE: %s\n", line);
}
}
break2:
return 0;
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
@@ -2906,7 +2867,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
u8 doff = 0;
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2972,7 +2933,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
- if (bus->ctrl_frame_stat == false) {
+ if (!bus->ctrl_frame_stat) {
brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
ret = 0;
} else {
@@ -2982,17 +2943,11 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
}
if (ret == -1) {
-#ifdef BCMDBG
- if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
- printk(KERN_DEBUG "Tx Frame:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- frame, len);
- } else if (BRCMF_HDRS_ON()) {
- printk(KERN_DEBUG "TxHdr:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- frame, min_t(u16, len, 16));
- }
-#endif
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+ frame, len, "Tx Frame:\n");
+ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+ BRCMF_HDRS_ON(),
+ frame, min_t(u16, len, 16), "TxHdr:\n");
do {
ret = brcmf_tx_frame(bus, frame, len);
@@ -3021,7 +2976,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
uint rxlen = 0;
bool pending;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3040,7 +2995,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
rxlen, msglen);
} else if (timeleft == 0) {
brcmf_dbg(ERROR, "resumed on timeout\n");
- } else if (pending == true) {
+ } else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
return -ERESTARTSYS;
} else {
@@ -3096,9 +3051,9 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
u8 *vbuffer;
u32 varsizew;
__le32 varsizew_le;
-#ifdef BCMDBG
+#ifdef DEBUG
char *nvram_ularray;
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* Even if there are no vars are to be written, we still
need to set the ramsize. */
@@ -3115,7 +3070,7 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
/* Write the vars list */
bcmerror =
brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
-#ifdef BCMDBG
+#ifdef DEBUG
/* Verify NVRAM bytes */
brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
@@ -3142,7 +3097,7 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
kfree(nvram_ularray);
-#endif /* BCMDBG */
+#endif /* DEBUG */
kfree(vbuffer);
}
@@ -3245,7 +3200,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
brcmf_dbg(INFO, "Enter\n");
- ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
+ ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3342,7 +3297,7 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
char *bufp;
int ret;
- ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
+ ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3432,7 +3387,7 @@ brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
static int brcmf_sdbrcm_bus_init(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
uint retries = 0;
@@ -3507,16 +3462,12 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_WATERMARK, 8, &err);
-
- /* Set bus state according to enable result */
- bus_if->state = BRCMF_BUS_DATA;
- }
-
- else {
+ } else {
/* Disable F2 again */
enable = SDIO_FUNC_ENABLE_1;
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0,
SDIO_CCCR_IOEx, enable, NULL);
+ ret = -ENODEV;
}
/* Restore previous clock setting */
@@ -3524,7 +3475,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
/* If we didn't come up, turn off backplane clock */
- if (bus_if->state != BRCMF_BUS_DATA)
+ if (!ret)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
@@ -3569,9 +3520,9 @@ void brcmf_sdbrcm_isr(void *arg)
static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
{
-#ifdef BCMDBG
+#ifdef DEBUG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
-#endif /* BCMDBG */
+#endif /* DEBUG */
brcmf_dbg(TIMER, "Enter\n");
@@ -3616,7 +3567,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
/* Update interrupt tracking */
bus->lastintrs = bus->intrcount;
}
-#ifdef BCMDBG
+#ifdef DEBUG
/* Poll for console output periodically */
if (bus_if->state == BRCMF_BUS_DATA &&
bus->console_interval != 0) {
@@ -3630,7 +3581,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->console_interval = 0;
}
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* On idle timeout clear activity flag and/or turn off clock */
if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
@@ -3721,11 +3672,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, SI_ENUM_BASE))
brcmf_dbg(ERROR, "FAILED to return to SI_ENUM_BASE\n");
-#ifdef BCMDBG
- printk(KERN_DEBUG "F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdcard_reg_read(bus->sdiodev, SI_ENUM_BASE, 4));
-
-#endif /* BCMDBG */
+ pr_debug("F1 signature read @0x18000000=0x%4x\n",
+ brcmf_sdcard_reg_read(bus->sdiodev, SI_ENUM_BASE, 4));
/*
* Force PLL off until brcmf_sdio_chip_attach()
@@ -3944,8 +3892,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
bus, "brcmf_watchdog");
if (IS_ERR(bus->watchdog_tsk)) {
- printk(KERN_WARNING
- "brcmf_watchdog thread failed to start\n");
+ pr_warn("brcmf_watchdog thread failed to start\n");
bus->watchdog_tsk = NULL;
}
/* Initialize DPC thread */
@@ -3953,8 +3900,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
bus, "brcmf_dpc");
if (IS_ERR(bus->dpc_tsk)) {
- printk(KERN_WARNING
- "brcmf_dpc thread failed to start\n");
+ pr_warn("brcmf_dpc thread failed to start\n");
bus->dpc_tsk = NULL;
}
@@ -4031,7 +3977,7 @@ void
brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
/* Totally stop the timer */
- if (!wdtick && bus->wd_timer_valid == true) {
+ if (!wdtick && bus->wd_timer_valid) {
del_timer_sync(&bus->timer);
bus->wd_timer_valid = false;
bus->save_ms = wdtick;
@@ -4044,7 +3990,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
if (wdtick) {
if (bus->save_ms != BRCMF_WD_POLL_MS) {
- if (bus->wd_timer_valid == true)
+ if (bus->wd_timer_valid)
/* Stop timer and restart at new value */
del_timer_sync(&bus->timer);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 11b2d7c97ba2..1534efc21631 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -15,6 +15,8 @@
*/
/* ***** SDIO interface chip backplane handle functions ***** */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
new file mode 100644
index 000000000000..82364223e817
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -0,0 +1,1621 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <net/cfg80211.h>
+
+#include <defs.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#include "usb_rdl.h"
+#include "usb.h"
+
+#define IOCTL_RESP_TIMEOUT 2000
+
+#define BRCMF_USB_SYNC_TIMEOUT 300 /* ms */
+#define BRCMF_USB_DLIMAGE_SPINWAIT 100 /* in unit of ms */
+#define BRCMF_USB_DLIMAGE_LIMIT 500 /* spinwait limit (ms) */
+
+#define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle
+ has boot up */
+#define BRCMF_USB_RESETCFG_SPINWAIT 1 /* wait after resetcfg (ms) */
+
+#define BRCMF_USB_NRXQ 50
+#define BRCMF_USB_NTXQ 50
+
+#define CONFIGDESC(usb) (&((usb)->actconfig)->desc)
+#define IFPTR(usb, idx) ((usb)->actconfig->interface[(idx)])
+#define IFALTS(usb, idx) (IFPTR((usb), (idx))->altsetting[0])
+#define IFDESC(usb, idx) IFALTS((usb), (idx)).desc
+#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[(ep)]).desc
+
+#define CONTROL_IF 0
+#define BULK_IF 0
+
+#define BRCMF_USB_CBCTL_WRITE 0
+#define BRCMF_USB_CBCTL_READ 1
+#define BRCMF_USB_MAX_PKT_SIZE 1600
+
+#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
+
+enum usbdev_suspend_state {
+ USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
+ suspend */
+ USBOS_SUSPEND_STATE_SUSPEND_PENDING, /* Device is idle, can be
+ * suspended. Wating PM to
+ * suspend the device
+ */
+ USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
+};
+
+struct brcmf_usb_probe_info {
+ void *usbdev_info;
+ struct usb_device *usb; /* USB device pointer from OS */
+ uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
+ int intr_size; /* Size of interrupt message */
+ int interval; /* Interrupt polling interval */
+ int vid;
+ int pid;
+ enum usb_device_speed device_speed;
+ enum usbdev_suspend_state suspend_state;
+ struct usb_interface *intf;
+};
+static struct brcmf_usb_probe_info usbdev_probe_info;
+
+struct brcmf_usb_image {
+ void *data;
+ u32 len;
+};
+static struct brcmf_usb_image g_image = { NULL, 0 };
+
+struct intr_transfer_buf {
+ u32 notification;
+ u32 reserved;
+};
+
+struct brcmf_usbdev_info {
+ struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
+ spinlock_t qlock;
+ struct list_head rx_freeq;
+ struct list_head rx_postq;
+ struct list_head tx_freeq;
+ struct list_head tx_postq;
+ enum usbdev_suspend_state suspend_state;
+ uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
+
+ bool activity;
+ int rx_low_watermark;
+ int tx_low_watermark;
+ int tx_high_watermark;
+ bool txoff;
+ bool rxoff;
+ bool txoverride;
+
+ struct brcmf_usbreq *tx_reqs;
+ struct brcmf_usbreq *rx_reqs;
+
+ u8 *image; /* buffer for combine fw and nvram */
+ int image_len;
+
+ wait_queue_head_t wait;
+ bool waitdone;
+ int sync_urb_status;
+
+ struct usb_device *usbdev;
+ struct device *dev;
+ enum usb_device_speed device_speed;
+
+ int ctl_in_pipe, ctl_out_pipe;
+ struct urb *ctl_urb; /* URB for control endpoint */
+ struct usb_ctrlrequest ctl_write;
+ struct usb_ctrlrequest ctl_read;
+ u32 ctl_urb_actual_length;
+ int ctl_urb_status;
+ int ctl_completed;
+ wait_queue_head_t ioctl_resp_wait;
+ wait_queue_head_t ctrl_wait;
+ ulong ctl_op;
+
+ bool rxctl_deferrespok;
+
+ struct urb *bulk_urb; /* used for FW download */
+ struct urb *intr_urb; /* URB for interrupt endpoint */
+ int intr_size; /* Size of interrupt message */
+ int interval; /* Interrupt polling interval */
+ struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
+
+ struct brcmf_usb_probe_info probe_info;
+
+};
+
+static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
+ struct brcmf_usbreq *req);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac usb driver.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac usb cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ return bus_if->bus_priv.usb;
+}
+
+static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
+{
+ return brcmf_usb_get_buspub(dev)->devinfo;
+}
+
+#if 0
+static void
+brcmf_usb_txflowcontrol(struct brcmf_usbdev_info *devinfo, bool onoff)
+{
+ dhd_txflowcontrol(devinfo->bus_pub.netdev, 0, onoff);
+}
+#endif
+
+static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo,
+ uint *condition, bool *pending)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = IOCTL_RESP_TIMEOUT;
+
+ /* Convert timeout in millsecond to jiffies */
+ timeout = msecs_to_jiffies(timeout);
+ /* Wait until control frame is available */
+ add_wait_queue(&devinfo->ioctl_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ smp_mb();
+ while (!(*condition) && (!signal_pending(current) && timeout)) {
+ timeout = schedule_timeout(timeout);
+ /* Wait until control frame is available */
+ smp_mb();
+ }
+
+ if (signal_pending(current))
+ *pending = true;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&devinfo->ioctl_resp_wait, &wait);
+
+ return timeout;
+}
+
+static int brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
+{
+ if (waitqueue_active(&devinfo->ioctl_resp_wait))
+ wake_up_interruptible(&devinfo->ioctl_resp_wait);
+
+ return 0;
+}
+
+static void
+brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
+{
+
+ if (unlikely(devinfo == NULL))
+ return;
+
+ if (type == BRCMF_USB_CBCTL_READ) {
+ if (status == 0)
+ devinfo->bus_pub.stats.rx_ctlpkts++;
+ else
+ devinfo->bus_pub.stats.rx_ctlerrs++;
+ } else if (type == BRCMF_USB_CBCTL_WRITE) {
+ if (status == 0)
+ devinfo->bus_pub.stats.tx_ctlpkts++;
+ else
+ devinfo->bus_pub.stats.tx_ctlerrs++;
+ }
+
+ devinfo->ctl_urb_status = status;
+ devinfo->ctl_completed = true;
+ brcmf_usb_ioctl_resp_wake(devinfo);
+}
+
+static void
+brcmf_usb_ctlread_complete(struct urb *urb)
+{
+ struct brcmf_usbdev_info *devinfo =
+ (struct brcmf_usbdev_info *)urb->context;
+
+ devinfo->ctl_urb_actual_length = urb->actual_length;
+ brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
+ urb->status);
+}
+
+static void
+brcmf_usb_ctlwrite_complete(struct urb *urb)
+{
+ struct brcmf_usbdev_info *devinfo =
+ (struct brcmf_usbdev_info *)urb->context;
+
+ brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
+ urb->status);
+}
+
+static int brcmf_usb_pnp(struct brcmf_usbdev_info *devinfo, uint state)
+{
+ return 0;
+}
+
+static int
+brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
+{
+ int ret;
+ u16 size;
+
+ if (devinfo == NULL || buf == NULL ||
+ len == 0 || devinfo->ctl_urb == NULL)
+ return -EINVAL;
+
+ /* If the USB/HSIC bus in sleep state, wake it up */
+ if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED)
+ if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
+ brcmf_dbg(ERROR, "Could not Resume the bus!\n");
+ return -EIO;
+ }
+
+ devinfo->activity = true;
+ size = len;
+ devinfo->ctl_write.wLength = cpu_to_le16p(&size);
+ devinfo->ctl_urb->transfer_buffer_length = size;
+ devinfo->ctl_urb_status = 0;
+ devinfo->ctl_urb_actual_length = 0;
+
+ usb_fill_control_urb(devinfo->ctl_urb,
+ devinfo->usbdev,
+ devinfo->ctl_out_pipe,
+ (unsigned char *) &devinfo->ctl_write,
+ buf, size,
+ (usb_complete_t)brcmf_usb_ctlwrite_complete,
+ devinfo);
+
+ ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
+ if (ret < 0)
+ brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+
+ return ret;
+}
+
+static int
+brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
+{
+ int ret;
+ u16 size;
+
+ if ((devinfo == NULL) || (buf == NULL) || (len == 0)
+ || (devinfo->ctl_urb == NULL))
+ return -EINVAL;
+
+ size = len;
+ devinfo->ctl_read.wLength = cpu_to_le16p(&size);
+ devinfo->ctl_urb->transfer_buffer_length = size;
+
+ if (devinfo->rxctl_deferrespok) {
+ /* BMAC model */
+ devinfo->ctl_read.bRequestType = USB_DIR_IN
+ | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
+ devinfo->ctl_read.bRequest = DL_DEFER_RESP_OK;
+ } else {
+ /* full dongle model */
+ devinfo->ctl_read.bRequestType = USB_DIR_IN
+ | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ devinfo->ctl_read.bRequest = 1;
+ }
+
+ usb_fill_control_urb(devinfo->ctl_urb,
+ devinfo->usbdev,
+ devinfo->ctl_in_pipe,
+ (unsigned char *) &devinfo->ctl_read,
+ buf, size,
+ (usb_complete_t)brcmf_usb_ctlread_complete,
+ devinfo);
+
+ ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
+ if (ret < 0)
+ brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+
+ return ret;
+}
+
+static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
+{
+ int err = 0;
+ int timeout = 0;
+ bool pending;
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+ if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
+ /* TODO: handle suspend/resume */
+ return -EIO;
+ }
+
+ if (test_and_set_bit(0, &devinfo->ctl_op))
+ return -EIO;
+
+ err = brcmf_usb_send_ctl(devinfo, buf, len);
+ if (err) {
+ brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+ return err;
+ }
+
+ devinfo->ctl_completed = false;
+ timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
+ &pending);
+ clear_bit(0, &devinfo->ctl_op);
+ if (!timeout) {
+ brcmf_dbg(ERROR, "Txctl wait timed out\n");
+ err = -EIO;
+ }
+ return err;
+}
+
+static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
+{
+ int err = 0;
+ int timeout = 0;
+ bool pending;
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+ if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
+ /* TODO: handle suspend/resume */
+ return -EIO;
+ }
+ if (test_and_set_bit(0, &devinfo->ctl_op))
+ return -EIO;
+
+ err = brcmf_usb_recv_ctl(devinfo, buf, len);
+ if (err) {
+ brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+ return err;
+ }
+ devinfo->ctl_completed = false;
+ timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
+ &pending);
+ err = devinfo->ctl_urb_status;
+ clear_bit(0, &devinfo->ctl_op);
+ if (!timeout) {
+ brcmf_dbg(ERROR, "rxctl wait timed out\n");
+ err = -EIO;
+ }
+ if (!err)
+ return devinfo->ctl_urb_actual_length;
+ else
+ return err;
+}
+
+static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
+ struct list_head *q)
+{
+ unsigned long flags;
+ struct brcmf_usbreq *req;
+ spin_lock_irqsave(&devinfo->qlock, flags);
+ if (list_empty(q)) {
+ spin_unlock_irqrestore(&devinfo->qlock, flags);
+ return NULL;
+ }
+ req = list_entry(q->next, struct brcmf_usbreq, list);
+ list_del_init(q->next);
+ spin_unlock_irqrestore(&devinfo->qlock, flags);
+ return req;
+
+}
+
+static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
+ struct list_head *q, struct brcmf_usbreq *req)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&devinfo->qlock, flags);
+ list_add_tail(&req->list, q);
+ spin_unlock_irqrestore(&devinfo->qlock, flags);
+}
+
+static struct brcmf_usbreq *
+brcmf_usbdev_qinit(struct list_head *q, int qsize)
+{
+ int i;
+ struct brcmf_usbreq *req, *reqs;
+
+ reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
+ if (reqs == NULL) {
+ brcmf_dbg(ERROR, "fail to allocate memory!\n");
+ return NULL;
+ }
+ req = reqs;
+
+ for (i = 0; i < qsize; i++) {
+ req->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!req->urb)
+ goto fail;
+
+ INIT_LIST_HEAD(&req->list);
+ list_add_tail(&req->list, q);
+ req++;
+ }
+ return reqs;
+fail:
+ brcmf_dbg(ERROR, "fail!\n");
+ while (!list_empty(q)) {
+ req = list_entry(q->next, struct brcmf_usbreq, list);
+ if (req && req->urb)
+ usb_free_urb(req->urb);
+ list_del(q->next);
+ }
+ return NULL;
+
+}
+
+static void brcmf_usb_free_q(struct list_head *q, bool pending)
+{
+ struct brcmf_usbreq *req, *next;
+ int i = 0;
+ list_for_each_entry_safe(req, next, q, list) {
+ if (!req->urb) {
+ brcmf_dbg(ERROR, "bad req\n");
+ break;
+ }
+ i++;
+ if (pending) {
+ usb_kill_urb(req->urb);
+ } else {
+ usb_free_urb(req->urb);
+ list_del_init(&req->list);
+ }
+ }
+}
+
+static void brcmf_usb_del_fromq(struct brcmf_usbdev_info *devinfo,
+ struct brcmf_usbreq *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&devinfo->qlock, flags);
+ list_del_init(&req->list);
+ spin_unlock_irqrestore(&devinfo->qlock, flags);
+}
+
+
+static void brcmf_usb_tx_complete(struct urb *urb)
+{
+ struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
+ struct brcmf_usbdev_info *devinfo = req->devinfo;
+
+ brcmf_usb_del_fromq(devinfo, req);
+ if (urb->status == 0)
+ devinfo->bus_pub.bus->dstats.tx_packets++;
+ else
+ devinfo->bus_pub.bus->dstats.tx_errors++;
+
+ dev_kfree_skb(req->skb);
+ req->skb = NULL;
+ brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
+
+}
+
+static void brcmf_usb_rx_complete(struct urb *urb)
+{
+ struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
+ struct brcmf_usbdev_info *devinfo = req->devinfo;
+ struct sk_buff *skb;
+ int ifidx = 0;
+
+ brcmf_usb_del_fromq(devinfo, req);
+ skb = req->skb;
+ req->skb = NULL;
+
+ if (urb->status == 0) {
+ devinfo->bus_pub.bus->dstats.rx_packets++;
+ } else {
+ devinfo->bus_pub.bus->dstats.rx_errors++;
+ dev_kfree_skb(skb);
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ return;
+ }
+
+ if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP) {
+ skb_put(skb, urb->actual_length);
+ if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
+ brcmf_dbg(ERROR, "rx protocol error\n");
+ brcmu_pkt_buf_free_skb(skb);
+ devinfo->bus_pub.bus->dstats.rx_errors++;
+ } else {
+ brcmf_rx_packet(devinfo->dev, ifidx, skb);
+ brcmf_usb_rx_refill(devinfo, req);
+ }
+ } else {
+ dev_kfree_skb(skb);
+ }
+ return;
+
+}
+
+static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
+ struct brcmf_usbreq *req)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!req || !devinfo)
+ return;
+
+ skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
+ if (!skb) {
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ return;
+ }
+ req->skb = skb;
+
+ usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe,
+ skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
+ req);
+ req->urb->transfer_flags |= URB_ZERO_PACKET;
+ req->devinfo = devinfo;
+
+ ret = usb_submit_urb(req->urb, GFP_ATOMIC);
+ if (ret == 0) {
+ brcmf_usb_enq(devinfo, &devinfo->rx_postq, req);
+ } else {
+ dev_kfree_skb(req->skb);
+ req->skb = NULL;
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ }
+ return;
+}
+
+static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
+{
+ struct brcmf_usbreq *req;
+
+ if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
+ brcmf_dbg(ERROR, "bus is not up\n");
+ return;
+ }
+ while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq)) != NULL)
+ brcmf_usb_rx_refill(devinfo, req);
+}
+
+static void
+brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
+{
+ struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
+ int old_state;
+
+
+ if (devinfo->bus_pub.state == state)
+ return;
+
+ old_state = devinfo->bus_pub.state;
+ brcmf_dbg(TRACE, "dbus state change from %d to to %d\n",
+ old_state, state);
+
+ /* Don't update state if it's PnP firmware re-download */
+ if (state != BCMFMAC_USB_STATE_PNP_FWDL) /* TODO */
+ devinfo->bus_pub.state = state;
+
+ if ((old_state == BCMFMAC_USB_STATE_SLEEP)
+ && (state == BCMFMAC_USB_STATE_UP)) {
+ brcmf_usb_rx_fill_all(devinfo);
+ }
+
+ /* update state of upper layer */
+ if (state == BCMFMAC_USB_STATE_DOWN) {
+ brcmf_dbg(INFO, "DBUS is down\n");
+ bcmf_bus->state = BRCMF_BUS_DOWN;
+ } else {
+ brcmf_dbg(INFO, "DBUS current state=%d\n", state);
+ }
+}
+
+static void
+brcmf_usb_intr_complete(struct urb *urb)
+{
+ struct brcmf_usbdev_info *devinfo =
+ (struct brcmf_usbdev_info *)urb->context;
+ bool killed;
+
+ if (devinfo == NULL)
+ return;
+
+ if (unlikely(urb->status)) {
+ if (devinfo->suspend_state ==
+ USBOS_SUSPEND_STATE_SUSPEND_PENDING)
+ killed = true;
+
+ if ((urb->status == -ENOENT && (!killed))
+ || urb->status == -ESHUTDOWN ||
+ urb->status == -ENODEV) {
+ brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
+ }
+ }
+
+ if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN) {
+ brcmf_dbg(ERROR, "intr cb when DBUS down, ignoring\n");
+ return;
+ }
+
+ if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
+ usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+}
+
+static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
+{
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ struct brcmf_usbreq *req;
+ int ret;
+
+ if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
+ /* TODO: handle suspend/resume */
+ return -EIO;
+ }
+
+ req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq);
+ if (!req) {
+ brcmf_dbg(ERROR, "no req to send\n");
+ return -ENOMEM;
+ }
+ if (!req->urb) {
+ brcmf_dbg(ERROR, "no urb for req %p\n", req);
+ return -ENOBUFS;
+ }
+
+ req->skb = skb;
+ req->devinfo = devinfo;
+ usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
+ skb->data, skb->len, brcmf_usb_tx_complete, req);
+ req->urb->transfer_flags |= URB_ZERO_PACKET;
+ ret = usb_submit_urb(req->urb, GFP_ATOMIC);
+ if (!ret) {
+ brcmf_usb_enq(devinfo, &devinfo->tx_postq, req);
+ } else {
+ req->skb = NULL;
+ brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
+ }
+
+ return ret;
+}
+
+
+static int brcmf_usb_up(struct device *dev)
+{
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ u16 ifnum;
+
+ if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
+ return 0;
+
+ /* If the USB/HSIC bus in sleep state, wake it up */
+ if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED) {
+ if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
+ brcmf_dbg(ERROR, "Could not Resume the bus!\n");
+ return -EIO;
+ }
+ }
+ devinfo->activity = true;
+
+ /* Success, indicate devinfo is fully up */
+ brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_UP);
+
+ if (devinfo->intr_urb) {
+ int ret;
+
+ usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
+ devinfo->intr_pipe,
+ &devinfo->intr,
+ devinfo->intr_size,
+ (usb_complete_t)brcmf_usb_intr_complete,
+ devinfo,
+ devinfo->interval);
+
+ ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+ if (ret) {
+ brcmf_dbg(ERROR, "USB_SUBMIT_URB failed with status %d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+
+ if (devinfo->ctl_urb) {
+ devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
+ devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
+
+ ifnum = IFDESC(devinfo->usbdev, CONTROL_IF).bInterfaceNumber;
+
+ /* CTL Write */
+ devinfo->ctl_write.bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ devinfo->ctl_write.bRequest = 0;
+ devinfo->ctl_write.wValue = cpu_to_le16(0);
+ devinfo->ctl_write.wIndex = cpu_to_le16p(&ifnum);
+
+ /* CTL Read */
+ devinfo->ctl_read.bRequestType =
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ devinfo->ctl_read.bRequest = 1;
+ devinfo->ctl_read.wValue = cpu_to_le16(0);
+ devinfo->ctl_read.wIndex = cpu_to_le16p(&ifnum);
+ }
+ brcmf_usb_rx_fill_all(devinfo);
+ return 0;
+}
+
+static void brcmf_usb_down(struct device *dev)
+{
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+ if (devinfo == NULL)
+ return;
+
+ brcmf_dbg(TRACE, "enter\n");
+ if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN)
+ return;
+
+ brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
+ if (devinfo->intr_urb)
+ usb_kill_urb(devinfo->intr_urb);
+
+ if (devinfo->ctl_urb)
+ usb_kill_urb(devinfo->ctl_urb);
+
+ if (devinfo->bulk_urb)
+ usb_kill_urb(devinfo->bulk_urb);
+ brcmf_usb_free_q(&devinfo->tx_postq, true);
+
+ brcmf_usb_free_q(&devinfo->rx_postq, true);
+}
+
+static int
+brcmf_usb_sync_wait(struct brcmf_usbdev_info *devinfo, u16 time)
+{
+ int ret;
+ int err = 0;
+ int ms = time;
+
+ ret = wait_event_interruptible_timeout(devinfo->wait,
+ devinfo->waitdone == true, (ms * HZ / 1000));
+
+ if ((devinfo->waitdone == false) || (devinfo->sync_urb_status)) {
+ brcmf_dbg(ERROR, "timeout(%d) or urb err=%d\n",
+ ret, devinfo->sync_urb_status);
+ err = -EINVAL;
+ }
+ devinfo->waitdone = false;
+ return err;
+}
+
+static void
+brcmf_usb_sync_complete(struct urb *urb)
+{
+ struct brcmf_usbdev_info *devinfo =
+ (struct brcmf_usbdev_info *)urb->context;
+
+ devinfo->waitdone = true;
+ wake_up_interruptible(&devinfo->wait);
+ devinfo->sync_urb_status = urb->status;
+}
+
+static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
+ void *buffer, int buflen)
+{
+ int ret = 0;
+ char *tmpbuf;
+ u16 size;
+
+ if ((!devinfo) || (devinfo->ctl_urb == NULL))
+ return false;
+
+ tmpbuf = kmalloc(buflen, GFP_ATOMIC);
+ if (!tmpbuf)
+ return false;
+
+ size = buflen;
+ devinfo->ctl_urb->transfer_buffer_length = size;
+
+ devinfo->ctl_read.wLength = cpu_to_le16p(&size);
+ devinfo->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE;
+ devinfo->ctl_read.bRequest = cmd;
+
+ usb_fill_control_urb(devinfo->ctl_urb,
+ devinfo->usbdev,
+ usb_rcvctrlpipe(devinfo->usbdev, 0),
+ (unsigned char *) &devinfo->ctl_read,
+ (void *) tmpbuf, size,
+ (usb_complete_t)brcmf_usb_sync_complete, devinfo);
+
+ ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+ kfree(tmpbuf);
+ return false;
+ }
+
+ ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
+ memcpy(buffer, tmpbuf, buflen);
+ kfree(tmpbuf);
+
+ return (ret == 0);
+}
+
+static bool
+brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo)
+{
+ struct bootrom_id_le id;
+ u32 chipid, chiprev;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ if (devinfo == NULL)
+ return false;
+
+ /* Check if firmware downloaded already by querying runtime ID */
+ id.chip = cpu_to_le32(0xDEAD);
+ brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id,
+ sizeof(struct bootrom_id_le));
+
+ chipid = le32_to_cpu(id.chip);
+ chiprev = le32_to_cpu(id.chiprev);
+
+ if ((chipid & 0x4300) == 0x4300)
+ brcmf_dbg(INFO, "chip %x rev 0x%x\n", chipid, chiprev);
+ else
+ brcmf_dbg(INFO, "chip %d rev 0x%x\n", chipid, chiprev);
+ if (chipid == BRCMF_POSTBOOT_ID) {
+ brcmf_dbg(INFO, "firmware already downloaded\n");
+ brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
+ sizeof(struct bootrom_id_le));
+ return false;
+ } else {
+ devinfo->bus_pub.devid = chipid;
+ devinfo->bus_pub.chiprev = chiprev;
+ }
+ return true;
+}
+
+static int
+brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
+{
+ struct bootrom_id_le id;
+ u16 wait = 0, wait_time;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ if (devinfo == NULL)
+ return -EINVAL;
+
+ /* Give dongle chance to boot */
+ wait_time = BRCMF_USB_DLIMAGE_SPINWAIT;
+ while (wait < BRCMF_USB_DLIMAGE_LIMIT) {
+ mdelay(wait_time);
+ wait += wait_time;
+ id.chip = cpu_to_le32(0xDEAD); /* Get the ID */
+ brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id,
+ sizeof(struct bootrom_id_le));
+ if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
+ break;
+ }
+
+ if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
+ brcmf_dbg(INFO, "download done %d ms postboot chip 0x%x/rev 0x%x\n",
+ wait, le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
+
+ brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
+ sizeof(struct bootrom_id_le));
+
+ /* XXX this wait may not be necessary */
+ mdelay(BRCMF_USB_RESETCFG_SPINWAIT);
+ return 0;
+ } else {
+ brcmf_dbg(ERROR, "Cannot talk to Dongle. Firmware is not UP, %d ms\n",
+ wait);
+ return -EINVAL;
+ }
+}
+
+
+static int
+brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
+{
+ int ret;
+
+ if ((devinfo == NULL) || (devinfo->bulk_urb == NULL))
+ return -EINVAL;
+
+ /* Prepare the URB */
+ usb_fill_bulk_urb(devinfo->bulk_urb, devinfo->usbdev,
+ devinfo->tx_pipe, buffer, len,
+ (usb_complete_t)brcmf_usb_sync_complete, devinfo);
+
+ devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
+
+ ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
+ if (ret) {
+ brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+ return ret;
+ }
+ ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
+ return ret;
+}
+
+static int
+brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
+{
+ unsigned int sendlen, sent, dllen;
+ char *bulkchunk = NULL, *dlpos;
+ struct rdl_state_le state;
+ u32 rdlstate, rdlbytes;
+ int err = 0;
+ brcmf_dbg(TRACE, "fw %p, len %d\n", fw, fwlen);
+
+ bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
+ if (bulkchunk == NULL) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* 1) Prepare USB boot loader for runtime image */
+ brcmf_usb_dl_cmd(devinfo, DL_START, &state,
+ sizeof(struct rdl_state_le));
+
+ rdlstate = le32_to_cpu(state.state);
+ rdlbytes = le32_to_cpu(state.bytes);
+
+ /* 2) Check we are in the Waiting state */
+ if (rdlstate != DL_WAITING) {
+ brcmf_dbg(ERROR, "Failed to DL_START\n");
+ err = -EINVAL;
+ goto fail;
+ }
+ sent = 0;
+ dlpos = fw;
+ dllen = fwlen;
+
+ /* Get chip id and rev */
+ while (rdlbytes != dllen) {
+ /* Wait until the usb device reports it received all
+ * the bytes we sent */
+ if ((rdlbytes == sent) && (rdlbytes != dllen)) {
+ if ((dllen-sent) < RDL_CHUNK)
+ sendlen = dllen-sent;
+ else
+ sendlen = RDL_CHUNK;
+
+ /* simply avoid having to send a ZLP by ensuring we
+ * never have an even
+ * multiple of 64
+ */
+ if (!(sendlen % 64))
+ sendlen -= 4;
+
+ /* send data */
+ memcpy(bulkchunk, dlpos, sendlen);
+ if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk,
+ sendlen)) {
+ brcmf_dbg(ERROR, "send_bulk failed\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ dlpos += sendlen;
+ sent += sendlen;
+ }
+ if (!brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
+ sizeof(struct rdl_state_le))) {
+ brcmf_dbg(ERROR, "DL_GETSTATE Failed xxxx\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ rdlstate = le32_to_cpu(state.state);
+ rdlbytes = le32_to_cpu(state.bytes);
+
+ /* restart if an error is reported */
+ if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) {
+ brcmf_dbg(ERROR, "Bad Hdr or Bad CRC state %d\n",
+ rdlstate);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+fail:
+ kfree(bulkchunk);
+ brcmf_dbg(TRACE, "err=%d\n", err);
+ return err;
+}
+
+static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
+{
+ int err;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ if (devinfo == NULL)
+ return -EINVAL;
+
+ if (devinfo->bus_pub.devid == 0xDEAD)
+ return -EINVAL;
+
+ err = brcmf_usb_dl_writeimage(devinfo, fw, len);
+ if (err == 0)
+ devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_DONE;
+ else
+ devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_PENDING;
+ brcmf_dbg(TRACE, "exit: err=%d\n", err);
+
+ return err;
+}
+
+static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
+{
+ struct rdl_state_le state;
+
+ brcmf_dbg(TRACE, "enter\n");
+ if (!devinfo)
+ return -EINVAL;
+
+ if (devinfo->bus_pub.devid == 0xDEAD)
+ return -EINVAL;
+
+ /* Check we are runnable */
+ brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
+ sizeof(struct rdl_state_le));
+
+ /* Start the image */
+ if (state.state == cpu_to_le32(DL_RUNNABLE)) {
+ if (!brcmf_usb_dl_cmd(devinfo, DL_GO, &state,
+ sizeof(struct rdl_state_le)))
+ return -ENODEV;
+ if (brcmf_usb_resetcfg(devinfo))
+ return -ENODEV;
+ /* The Dongle may go for re-enumeration. */
+ } else {
+ brcmf_dbg(ERROR, "Dongle not runnable\n");
+ return -EINVAL;
+ }
+ brcmf_dbg(TRACE, "exit\n");
+ return 0;
+}
+
+static bool brcmf_usb_chip_support(int chipid, int chiprev)
+{
+ switch(chipid) {
+ case 43235:
+ case 43236:
+ case 43238:
+ return (chiprev == 3);
+ default:
+ break;
+ }
+ return false;
+}
+
+static int
+brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
+{
+ int devid, chiprev;
+ int err;
+
+ brcmf_dbg(TRACE, "enter\n");
+ if (devinfo == NULL)
+ return -ENODEV;
+
+ devid = devinfo->bus_pub.devid;
+ chiprev = devinfo->bus_pub.chiprev;
+
+ if (!brcmf_usb_chip_support(devid, chiprev)) {
+ brcmf_dbg(ERROR, "unsupported chip %d rev %d\n",
+ devid, chiprev);
+ return -EINVAL;
+ }
+
+ if (!devinfo->image) {
+ brcmf_dbg(ERROR, "No firmware!\n");
+ return -ENOENT;
+ }
+
+ err = brcmf_usb_dlstart(devinfo,
+ devinfo->image, devinfo->image_len);
+ if (err == 0)
+ err = brcmf_usb_dlrun(devinfo);
+ return err;
+}
+
+
+static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
+{
+ struct brcmf_usbdev_info *devinfo =
+ (struct brcmf_usbdev_info *)bus_pub;
+
+ brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
+
+ /* store the image globally */
+ g_image.data = devinfo->image;
+ g_image.len = devinfo->image_len;
+
+ /* free the URBS */
+ brcmf_usb_free_q(&devinfo->rx_freeq, false);
+ brcmf_usb_free_q(&devinfo->tx_freeq, false);
+
+ usb_free_urb(devinfo->intr_urb);
+ usb_free_urb(devinfo->ctl_urb);
+ usb_free_urb(devinfo->bulk_urb);
+
+ kfree(devinfo->tx_reqs);
+ kfree(devinfo->rx_reqs);
+ kfree(devinfo);
+}
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_VERSION 1 /* Version 1 */
+#define TRX_MAX_LEN 0x3B0000 /* Max length */
+#define TRX_NO_HEADER 1 /* Do not write TRX header */
+#define TRX_MAX_OFFSET 3 /* Max number of individual files */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */
+
+struct trx_header_le {
+ __le32 magic; /* "HDR0" */
+ __le32 len; /* Length of file including header */
+ __le32 crc32; /* CRC from flag_version to end of file */
+ __le32 flag_version; /* 0:15 flags, 16:31 version */
+ __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
+ * header */
+};
+
+static int check_file(const u8 *headers)
+{
+ struct trx_header_le *trx;
+ int actual_len = -1;
+
+ /* Extract trx header */
+ trx = (struct trx_header_le *) headers;
+ if (trx->magic != cpu_to_le32(TRX_MAGIC))
+ return -1;
+
+ headers += sizeof(struct trx_header_le);
+
+ if (le32_to_cpu(trx->flag_version) & TRX_UNCOMP_IMAGE) {
+ actual_len = le32_to_cpu(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]);
+ return actual_len + sizeof(struct trx_header_le);
+ }
+ return -1;
+}
+
+static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
+{
+ s8 *fwname;
+ const struct firmware *fw;
+ int err;
+
+ devinfo->image = g_image.data;
+ devinfo->image_len = g_image.len;
+
+ /*
+ * if we have an image we can leave here.
+ */
+ if (devinfo->image)
+ return 0;
+
+ fwname = BRCMF_USB_43236_FW_NAME;
+
+ err = request_firmware(&fw, fwname, devinfo->dev);
+ if (!fw) {
+ brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
+ return err;
+ }
+ if (check_file(fw->data) < 0) {
+ brcmf_dbg(ERROR, "invalid firmware %s\n", fwname);
+ return -EINVAL;
+ }
+
+ devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+ if (!devinfo->image)
+ return -ENOMEM;
+
+ memcpy(devinfo->image, fw->data, fw->size);
+ devinfo->image_len = fw->size;
+
+ release_firmware(fw);
+ return 0;
+}
+
+
+static
+struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
+{
+ struct brcmf_usbdev_info *devinfo;
+
+ devinfo = kzalloc(sizeof(struct brcmf_usbdev_info), GFP_ATOMIC);
+ if (devinfo == NULL)
+ return NULL;
+
+ devinfo->bus_pub.nrxq = nrxq;
+ devinfo->rx_low_watermark = nrxq / 2;
+ devinfo->bus_pub.devinfo = devinfo;
+ devinfo->bus_pub.ntxq = ntxq;
+
+ /* flow control when too many tx urbs posted */
+ devinfo->tx_low_watermark = ntxq / 4;
+ devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
+ devinfo->dev = dev;
+ devinfo->usbdev = usbdev_probe_info.usb;
+ devinfo->tx_pipe = usbdev_probe_info.tx_pipe;
+ devinfo->rx_pipe = usbdev_probe_info.rx_pipe;
+ devinfo->rx_pipe2 = usbdev_probe_info.rx_pipe2;
+ devinfo->intr_pipe = usbdev_probe_info.intr_pipe;
+
+ devinfo->interval = usbdev_probe_info.interval;
+ devinfo->intr_size = usbdev_probe_info.intr_size;
+
+ memcpy(&devinfo->probe_info, &usbdev_probe_info,
+ sizeof(struct brcmf_usb_probe_info));
+ devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
+
+ /* Initialize other structure content */
+ init_waitqueue_head(&devinfo->ioctl_resp_wait);
+
+ /* Initialize the spinlocks */
+ spin_lock_init(&devinfo->qlock);
+
+ INIT_LIST_HEAD(&devinfo->rx_freeq);
+ INIT_LIST_HEAD(&devinfo->rx_postq);
+
+ INIT_LIST_HEAD(&devinfo->tx_freeq);
+ INIT_LIST_HEAD(&devinfo->tx_postq);
+
+ devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
+ if (!devinfo->rx_reqs)
+ goto error;
+
+ devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
+ if (!devinfo->tx_reqs)
+ goto error;
+
+ devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!devinfo->intr_urb) {
+ brcmf_dbg(ERROR, "usb_alloc_urb (intr) failed\n");
+ goto error;
+ }
+ devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!devinfo->ctl_urb) {
+ brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
+ goto error;
+ }
+ devinfo->rxctl_deferrespok = 0;
+
+ devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!devinfo->bulk_urb) {
+ brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
+ goto error;
+ }
+
+ init_waitqueue_head(&devinfo->wait);
+ if (!brcmf_usb_dlneeded(devinfo))
+ return &devinfo->bus_pub;
+
+ brcmf_dbg(TRACE, "start fw downloading\n");
+ if (brcmf_usb_get_fw(devinfo))
+ goto error;
+
+ if (brcmf_usb_fw_download(devinfo))
+ goto error;
+
+ return &devinfo->bus_pub;
+
+error:
+ brcmf_dbg(ERROR, "failed!\n");
+ brcmf_usb_detach(&devinfo->bus_pub);
+ return NULL;
+}
+
+static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
+ u32 bustype, u32 hdrlen)
+{
+ struct brcmf_bus *bus = NULL;
+ struct brcmf_usbdev *bus_pub = NULL;
+ int ret;
+
+
+ bus_pub = brcmf_usb_attach(BRCMF_USB_NRXQ, BRCMF_USB_NTXQ, dev);
+ if (!bus_pub) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ bus_pub->bus = bus;
+ bus->brcmf_bus_txdata = brcmf_usb_tx;
+ bus->brcmf_bus_init = brcmf_usb_up;
+ bus->brcmf_bus_stop = brcmf_usb_down;
+ bus->brcmf_bus_txctl = brcmf_usb_tx_ctlpkt;
+ bus->brcmf_bus_rxctl = brcmf_usb_rx_ctlpkt;
+ bus->type = bustype;
+ bus->bus_priv.usb = bus_pub;
+ dev_set_drvdata(dev, bus);
+
+ /* Attach to the common driver interface */
+ ret = brcmf_attach(hdrlen, dev);
+ if (ret) {
+ brcmf_dbg(ERROR, "dhd_attach failed\n");
+ goto fail;
+ }
+
+ ret = brcmf_bus_start(dev);
+ if (ret == -ENOLINK) {
+ brcmf_dbg(ERROR, "dongle is not responding\n");
+ brcmf_detach(dev);
+ goto fail;
+ }
+
+ /* add interface and open for business */
+ ret = brcmf_add_if(dev, 0, "wlan%d", NULL);
+ if (ret) {
+ brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
+ brcmf_detach(dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ /* Release resources in reverse order */
+ if (bus_pub)
+ brcmf_usb_detach(bus_pub);
+ kfree(bus);
+ return ret;
+}
+
+static void
+brcmf_usb_disconnect_cb(struct brcmf_usbdev *bus_pub)
+{
+ if (!bus_pub)
+ return;
+ brcmf_dbg(TRACE, "enter: bus_pub %p\n", bus_pub);
+
+ brcmf_detach(bus_pub->devinfo->dev);
+ kfree(bus_pub->bus);
+ brcmf_usb_detach(bus_pub);
+
+}
+
+static int
+brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ int ep;
+ struct usb_endpoint_descriptor *endpoint;
+ int ret = 0;
+ struct usb_device *usb = interface_to_usbdev(intf);
+ int num_of_eps;
+ u8 endpoint_num;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ usbdev_probe_info.usb = usb;
+ usbdev_probe_info.intf = intf;
+
+ if (id != NULL) {
+ usbdev_probe_info.vid = id->idVendor;
+ usbdev_probe_info.pid = id->idProduct;
+ }
+
+ usb_set_intfdata(intf, &usbdev_probe_info);
+
+ /* Check that the device supports only one configuration */
+ if (usb->descriptor.bNumConfigurations != 1) {
+ ret = -1;
+ goto fail;
+ }
+
+ if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
+ ret = -1;
+ goto fail;
+ }
+
+ /*
+ * Only the BDC interface configuration is supported:
+ * Device class: USB_CLASS_VENDOR_SPEC
+ * if0 class: USB_CLASS_VENDOR_SPEC
+ * if0/ep0: control
+ * if0/ep1: bulk in
+ * if0/ep2: bulk out (ok if swapped with bulk in)
+ */
+ if (CONFIGDESC(usb)->bNumInterfaces != 1) {
+ ret = -1;
+ goto fail;
+ }
+
+ /* Check interface */
+ if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
+ IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
+ IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) {
+ brcmf_dbg(ERROR, "invalid control interface: class %d, subclass %d, proto %d\n",
+ IFDESC(usb, CONTROL_IF).bInterfaceClass,
+ IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
+ IFDESC(usb, CONTROL_IF).bInterfaceProtocol);
+ ret = -1;
+ goto fail;
+ }
+
+ /* Check control endpoint */
+ endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ != USB_ENDPOINT_XFER_INT) {
+ brcmf_dbg(ERROR, "invalid control endpoint %d\n",
+ endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+ ret = -1;
+ goto fail;
+ }
+
+ endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ usbdev_probe_info.intr_pipe = usb_rcvintpipe(usb, endpoint_num);
+
+ usbdev_probe_info.rx_pipe = 0;
+ usbdev_probe_info.rx_pipe2 = 0;
+ usbdev_probe_info.tx_pipe = 0;
+ num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
+
+ /* Check data endpoints and get pipes */
+ for (ep = 1; ep <= num_of_eps; ep++) {
+ endpoint = &IFEPDESC(usb, BULK_IF, ep);
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+ USB_ENDPOINT_XFER_BULK) {
+ brcmf_dbg(ERROR, "invalid data endpoint %d\n", ep);
+ ret = -1;
+ goto fail;
+ }
+
+ endpoint_num = endpoint->bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK;
+ if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_IN) {
+ if (!usbdev_probe_info.rx_pipe) {
+ usbdev_probe_info.rx_pipe =
+ usb_rcvbulkpipe(usb, endpoint_num);
+ } else {
+ usbdev_probe_info.rx_pipe2 =
+ usb_rcvbulkpipe(usb, endpoint_num);
+ }
+ } else {
+ usbdev_probe_info.tx_pipe =
+ usb_sndbulkpipe(usb, endpoint_num);
+ }
+ }
+
+ /* Allocate interrupt URB and data buffer */
+ /* RNDIS says 8-byte intr, our old drivers used 4-byte */
+ if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
+ usbdev_probe_info.intr_size = 8;
+ else
+ usbdev_probe_info.intr_size = 4;
+
+ usbdev_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+
+ usbdev_probe_info.device_speed = usb->speed;
+ if (usb->speed == USB_SPEED_HIGH)
+ brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
+ else
+ brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
+
+ ret = brcmf_usb_probe_cb(&usb->dev, "", USB_BUS, 0);
+ if (ret)
+ goto fail;
+
+ /* Success */
+ return 0;
+
+fail:
+ brcmf_dbg(ERROR, "failed with errno %d\n", ret);
+ usb_set_intfdata(intf, NULL);
+ return ret;
+
+}
+
+static void
+brcmf_usb_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *usb = interface_to_usbdev(intf);
+
+ brcmf_dbg(TRACE, "enter\n");
+ brcmf_usb_disconnect_cb(brcmf_usb_get_buspub(&usb->dev));
+ usb_set_intfdata(intf, NULL);
+}
+
+/*
+ * only need to signal the bus being down and update the suspend state.
+ */
+static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
+{
+ struct usb_device *usb = interface_to_usbdev(intf);
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+ brcmf_dbg(TRACE, "enter\n");
+ devinfo->bus_pub.state = BCMFMAC_USB_STATE_DOWN;
+ devinfo->suspend_state = USBOS_SUSPEND_STATE_SUSPENDED;
+ return 0;
+}
+
+/*
+ * mark suspend state active and crank up the bus.
+ */
+static int brcmf_usb_resume(struct usb_interface *intf)
+{
+ struct usb_device *usb = interface_to_usbdev(intf);
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+ brcmf_dbg(TRACE, "enter\n");
+ devinfo->suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
+ brcmf_bus_start(&usb->dev);
+ return 0;
+}
+
+#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
+#define BRCMF_USB_DEVICE_ID_43236 0xbd17
+#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc
+
+static struct usb_device_id brcmf_usb_devid_table[] = {
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
+ /* special entry for device with firmware loaded and running */
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
+MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
+
+/* TODO: suspend and resume entries */
+static struct usb_driver brcmf_usbdrvr = {
+ .name = KBUILD_MODNAME,
+ .probe = brcmf_usb_probe,
+ .disconnect = brcmf_usb_disconnect,
+ .id_table = brcmf_usb_devid_table,
+ .suspend = brcmf_usb_suspend,
+ .resume = brcmf_usb_resume,
+ .supports_autosuspend = 1
+};
+
+void brcmf_usb_exit(void)
+{
+ usb_deregister(&brcmf_usbdrvr);
+ kfree(g_image.data);
+ g_image.data = NULL;
+ g_image.len = 0;
+}
+
+void brcmf_usb_init(void)
+{
+ usb_register(&brcmf_usbdrvr);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
new file mode 100644
index 000000000000..acfa5e89872f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_USB_H
+#define BRCMFMAC_USB_H
+
+enum brcmf_usb_state {
+ BCMFMAC_USB_STATE_DL_PENDING,
+ BCMFMAC_USB_STATE_DL_DONE,
+ BCMFMAC_USB_STATE_UP,
+ BCMFMAC_USB_STATE_DOWN,
+ BCMFMAC_USB_STATE_PNP_FWDL,
+ BCMFMAC_USB_STATE_DISCONNECT,
+ BCMFMAC_USB_STATE_SLEEP
+};
+
+enum brcmf_usb_pnp_state {
+ BCMFMAC_USB_PNP_DISCONNECT,
+ BCMFMAC_USB_PNP_SLEEP,
+ BCMFMAC_USB_PNP_RESUME,
+};
+
+struct brcmf_stats {
+ u32 tx_ctlpkts;
+ u32 tx_ctlerrs;
+ u32 rx_ctlpkts;
+ u32 rx_ctlerrs;
+};
+
+struct brcmf_usbdev {
+ struct brcmf_bus *bus;
+ struct brcmf_usbdev_info *devinfo;
+ enum brcmf_usb_state state;
+ struct brcmf_stats stats;
+ int ntxq, nrxq, rxsize;
+ u32 bus_mtu;
+ int devid;
+ int chiprev; /* chip revsion number */
+};
+
+/* IO Request Block (IRB) */
+struct brcmf_usbreq {
+ struct list_head list;
+ struct brcmf_usbdev_info *devinfo;
+ struct urb *urb;
+ struct sk_buff *skb;
+};
+
+#endif /* BRCMFMAC_USB_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h b/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h
new file mode 100644
index 000000000000..0a35c51c3da2
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _USB_RDL_H
+#define _USB_RDL_H
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
+#define DL_CHECK_CRC 1 /* currently unused */
+#define DL_GO 2 /* execute downloaded image */
+#define DL_START 3 /* initialize dl state */
+#define DL_REBOOT 4 /* reboot the device in 2 seconds */
+#define DL_GETVER 5 /* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset
+ * event to occur in 2 seconds. It is the
+ * responsibility of the downloaded code to
+ * clear this event
+ */
+#define DL_EXEC 7 /* jump to a supplied address */
+#define DL_RESETCFG 8 /* To support single enum on dongle
+ * - Not used by bootloader
+ */
+#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
+ * if resp unavailable
+ */
+
+/* states */
+#define DL_WAITING 0 /* waiting to rx first pkt */
+#define DL_READY 1 /* hdr was good, waiting for more of the
+ * compressed image */
+#define DL_BAD_HDR 2 /* hdr was corrupted */
+#define DL_BAD_CRC 3 /* compressed image was corrupted */
+#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */
+#define DL_START_FAIL 5 /* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM
+ * value */
+#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START
+ * for rdl) */
+
+struct rdl_state_le {
+ __le32 state;
+ __le32 bytes;
+};
+
+struct bootrom_id_le {
+ __le32 chip; /* Chip id */
+ __le32 chiprev; /* Chip rev */
+ __le32 ramsize; /* Size of RAM */
+ __le32 remapbase; /* Current remap base address */
+ __le32 boardtype; /* Type of board */
+ __le32 boardrev; /* Board revision */
+};
+
+#define RDL_CHUNK 1500 /* size of each dl transfer */
+
+#define TRX_OFFSETS_DLFWLEN_IDX 0
+#define TRX_OFFSETS_JUMPTO_IDX 1
+#define TRX_OFFSETS_NVM_LEN_IDX 2
+
+#define TRX_OFFSETS_DLBASE_IDX 0
+
+#endif /* _USB_RDL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index bf11850a20f1..d13ae9c299f2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -16,6 +16,8 @@
/* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/sched.h>
@@ -1374,7 +1376,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
memset(&join_params, 0, sizeof(join_params));
join_params_size = sizeof(join_params.ssid_le);
- ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), sme->ssid_len);
+ ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len);
memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len);
memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len);
join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
@@ -2001,7 +2003,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
s32 err = 0;
u16 channel;
u32 freq;
- u64 notify_timestamp;
u16 notify_capability;
u16 notify_interval;
u8 *notify_ie;
@@ -2024,7 +2025,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
freq = ieee80211_channel_to_frequency(channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
- notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
notify_capability = le16_to_cpu(bi->capability);
notify_interval = le16_to_cpu(bi->beacon_period);
notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
@@ -2038,10 +2038,9 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
WL_CONN("Capability: %X\n", notify_capability);
WL_CONN("Beacon interval: %d\n", notify_interval);
WL_CONN("Signal: %d\n", notify_signal);
- WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID,
- notify_timestamp, notify_capability, notify_interval, notify_ie,
+ 0, notify_capability, notify_interval, notify_ie,
notify_ielen, notify_signal, GFP_KERNEL);
if (!bss)
@@ -2096,7 +2095,6 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
s32 err = 0;
u16 channel;
u32 freq;
- u64 notify_timestamp;
u16 notify_capability;
u16 notify_interval;
u8 *notify_ie;
@@ -2132,7 +2130,6 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
freq = ieee80211_channel_to_frequency(channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
- notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
notify_capability = le16_to_cpu(bi->capability);
notify_interval = le16_to_cpu(bi->beacon_period);
notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
@@ -2143,10 +2140,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
WL_CONN("capability: %X\n", notify_capability);
WL_CONN("beacon interval: %d\n", notify_interval);
WL_CONN("signal: %d\n", notify_signal);
- WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
- notify_timestamp, notify_capability, notify_interval,
+ 0, notify_capability, notify_interval,
notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
if (!bss) {
@@ -2783,7 +2779,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
wiphy_new(&wl_cfg80211_ops,
sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
if (!wdev->wiphy) {
- WL_ERR("Couldn not allocate wiphy device\n");
+ WL_ERR("Could not allocate wiphy device\n");
err = -ENOMEM;
goto wiphy_new_out;
}
@@ -2809,7 +2805,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
*/
err = wiphy_register(wdev->wiphy);
if (err < 0) {
- WL_ERR("Couldn not register wiphy device (%d)\n", err);
+ WL_ERR("Could not register wiphy device (%d)\n", err);
goto wiphy_register_out;
}
return wdev;
@@ -3295,7 +3291,9 @@ static struct brcmf_cfg80211_event_q *brcmf_deq_event(
}
/*
-** push event to tail of the queue
+* push event to tail of the queue
+*
+* remark: this function may not sleep as it is called in atomic context.
*/
static s32
@@ -3304,17 +3302,18 @@ brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
{
struct brcmf_cfg80211_event_q *e;
s32 err = 0;
+ ulong flags;
- e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_KERNEL);
+ e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC);
if (!e)
return -ENOMEM;
e->etype = event;
memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
- spin_lock_irq(&cfg_priv->evt_q_lock);
+ spin_lock_irqsave(&cfg_priv->evt_q_lock, flags);
list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list);
- spin_unlock_irq(&cfg_priv->evt_q_lock);
+ spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index a613b49cb13f..b5d9b36df3d0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -32,63 +32,63 @@ struct brcmf_cfg80211_ibss;
#define WL_DBG_MASK ((WL_DBG_INFO | WL_DBG_ERR | WL_DBG_TRACE) | \
(WL_DBG_SCAN) | (WL_DBG_CONN))
-#define WL_ERR(fmt, args...) \
+#define WL_ERR(fmt, ...) \
do { \
if (brcmf_dbg_level & WL_DBG_ERR) { \
if (net_ratelimit()) { \
- printk(KERN_ERR "ERROR @%s : " fmt, \
- __func__, ##args); \
+ pr_err("ERROR @%s : " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} \
} while (0)
-#if (defined BCMDBG)
-#define WL_INFO(fmt, args...) \
+#if (defined DEBUG)
+#define WL_INFO(fmt, ...) \
do { \
if (brcmf_dbg_level & WL_DBG_INFO) { \
if (net_ratelimit()) { \
- printk(KERN_ERR "INFO @%s : " fmt, \
- __func__, ##args); \
+ pr_err("INFO @%s : " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} \
} while (0)
-#define WL_TRACE(fmt, args...) \
+#define WL_TRACE(fmt, ...) \
do { \
if (brcmf_dbg_level & WL_DBG_TRACE) { \
if (net_ratelimit()) { \
- printk(KERN_ERR "TRACE @%s : " fmt, \
- __func__, ##args); \
+ pr_err("TRACE @%s : " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} \
} while (0)
-#define WL_SCAN(fmt, args...) \
+#define WL_SCAN(fmt, ...) \
do { \
if (brcmf_dbg_level & WL_DBG_SCAN) { \
if (net_ratelimit()) { \
- printk(KERN_ERR "SCAN @%s : " fmt, \
- __func__, ##args); \
+ pr_err("SCAN @%s : " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} \
} while (0)
-#define WL_CONN(fmt, args...) \
+#define WL_CONN(fmt, ...) \
do { \
if (brcmf_dbg_level & WL_DBG_CONN) { \
if (net_ratelimit()) { \
- printk(KERN_ERR "CONN @%s : " fmt, \
- __func__, ##args); \
+ pr_err("CONN @%s : " fmt, \
+ __func__, ##__VA_ARGS__); \
} \
} \
} while (0)
-#else /* (defined BCMDBG) */
+#else /* (defined DEBUG) */
#define WL_INFO(fmt, args...)
#define WL_TRACE(fmt, args...)
#define WL_SCAN(fmt, args...)
#define WL_CONN(fmt, args...)
-#endif /* (defined BCMDBG) */
+#endif /* (defined DEBUG) */
#define WL_NUM_SCAN_MAX 1
#define WL_NUM_PMKIDS_MAX MAXPMKID /* will be used
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index ab9bb11abfbb..c93ea35bceec 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -326,11 +326,11 @@
#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
-#ifdef BCMDBG
+#ifdef DEBUG
#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif /* BCMDBG */
+#endif /* DEBUG */
#define GOODCOREADDR(x, b) \
(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 30b58870b1b6..95b5902bc4b3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -915,7 +915,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
struct wiphy *wiphy = wlc->wiphy;
-#ifdef BCMDBG
+#ifdef DEBUG
u8 hole[AMPDU_MAX_MPDU];
memset(hole, 0, sizeof(hole));
#endif
@@ -959,14 +959,13 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (supr_status) {
update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
- wiphy_err(wiphy, "%s: Pkt tx suppressed, "
- "illegal channel possibly %d\n",
+ wiphy_err(wiphy,
+ "%s: Pkt tx suppressed, illegal channel possibly %d\n",
__func__, CHSPEC_CHANNEL(
wlc->default_bss->chanspec));
} else {
if (supr_status != TX_STATUS_SUPR_FRAG)
- wiphy_err(wiphy, "%s:"
- "supr_status 0x%x\n",
+ wiphy_err(wiphy, "%s: supr_status 0x%x\n",
__func__, supr_status);
}
/* no need to retry for badch; will fail again */
@@ -988,9 +987,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
}
} else if (txs->phyerr) {
update_rate = false;
- wiphy_err(wiphy, "wl%d: ampdu tx phy "
- "error (0x%x)\n", wlc->pub->unit,
- txs->phyerr);
+ wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n",
+ __func__, txs->phyerr);
if (brcm_msg_level & LOG_ERROR_VAL) {
brcmu_prpkt("txpkt (AMPDU)", p);
@@ -1018,10 +1016,10 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ack_recd = false;
if (ba_recd) {
bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
- BCMMSG(wlc->wiphy, "tid %d seq %d,"
- " start_seq %d, bindex %d set %d, index %d\n",
- tid, seq, start_seq, bindex,
- isset(bitmap, bindex), index);
+ BCMMSG(wiphy,
+ "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
+ tid, seq, start_seq, bindex,
+ isset(bitmap, bindex), index);
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 2e90a9a16ed6..11054ae9d4f6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -177,7 +177,7 @@
#define BCMEXTRAHDROOM 172
/* debug/trace */
-#ifdef BCMDBG
+#ifdef DEBUG
#define DMA_ERROR(fmt, ...) \
do { \
if (*di->msg_level & 1) \
@@ -193,7 +193,7 @@ do { \
no_printk(fmt, ##__VA_ARGS__)
#define DMA_TRACE(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
-#endif /* BCMDBG */
+#endif /* DEBUG */
#define DMA_NONE(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
@@ -968,7 +968,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
pktcnt++;
}
-#ifdef BCMDBG
+#ifdef DEBUG
if (resid > 0) {
uint cur;
cur =
@@ -979,7 +979,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
di->rxin, di->rxout, cur);
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR("%s: bad frame length (%d)\n",
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 448ab9c4eb47..569ab8abd2a1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -15,6 +15,7 @@
*/
#define __UNDEF_NO_VERSION__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/etherdevice.h>
#include <linux/sched.h>
@@ -96,10 +97,10 @@ static struct bcma_device_id brcms_coreid_table[] = {
};
MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
-#ifdef BCMDBG
+#ifdef DEBUG
static int msglevel = 0xdeadbeef;
module_param(msglevel, int, 0);
-#endif /* BCMDBG */
+#endif /* DEBUG */
static struct ieee80211_channel brcms_2ghz_chantable[] = {
CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
@@ -857,7 +858,7 @@ static void brcms_free(struct brcms_info *wl)
/* free timers */
for (t = wl->timers; t; t = next) {
next = t->next;
-#ifdef BCMDBG
+#ifdef DEBUG
kfree(t->name);
#endif
kfree(t);
@@ -1121,8 +1122,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
wl = brcms_attach(pdev);
if (!wl) {
- pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
- __func__);
+ pr_err("%s: brcms_attach failed!\n", __func__);
return -ENODEV;
}
return 0;
@@ -1136,8 +1136,8 @@ static int brcms_suspend(struct bcma_device *pdev)
hw = bcma_get_drvdata(pdev);
wl = hw->priv;
if (!wl) {
- wiphy_err(wl->wiphy,
- "brcms_suspend: bcma_get_drvdata failed\n");
+ pr_err("%s: %s: no driver private struct!\n", KBUILD_MODNAME,
+ __func__);
return -ENODEV;
}
@@ -1169,25 +1169,31 @@ static struct bcma_driver brcms_bcma_driver = {
/**
* This is the main entry point for the brcmsmac driver.
*
- * This function determines if a device pointed to by pdev is a WL device,
- * and if so, performs a brcms_attach() on it.
- *
+ * This function is scheduled upon module initialization and
+ * does the driver registration, which result in brcms_bcma_probe()
+ * call resulting in the driver bringup.
*/
-static int __init brcms_module_init(void)
+static void brcms_driver_init(struct work_struct *work)
{
- int error = -ENODEV;
+ int error;
+
+ error = bcma_driver_register(&brcms_bcma_driver);
+ if (error)
+ pr_err("%s: register returned %d\n", __func__, error);
+}
-#ifdef BCMDBG
+static DECLARE_WORK(brcms_driver_work, brcms_driver_init);
+
+static int __init brcms_module_init(void)
+{
+#ifdef DEBUG
if (msglevel != 0xdeadbeef)
brcm_msg_level = msglevel;
-#endif /* BCMDBG */
-
- error = bcma_driver_register(&brcms_bcma_driver);
- printk(KERN_ERR "%s: register returned %d\n", __func__, error);
- if (!error)
- return 0;
+#endif
+ if (!schedule_work(&brcms_driver_work))
+ return -EBUSY;
- return error;
+ return 0;
}
/**
@@ -1199,6 +1205,7 @@ static int __init brcms_module_init(void)
*/
static void __exit brcms_module_exit(void)
{
+ cancel_work_sync(&brcms_driver_work);
bcma_driver_unregister(&brcms_bcma_driver);
}
@@ -1367,7 +1374,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
t->next = wl->timers;
wl->timers = t;
-#ifdef BCMDBG
+#ifdef DEBUG
t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
if (t->name)
strcpy(t->name, name);
@@ -1386,7 +1393,7 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
{
struct ieee80211_hw *hw = t->wl->pub->ieee_hw;
-#ifdef BCMDBG
+#ifdef DEBUG
if (t->set)
wiphy_err(hw->wiphy, "%s: Already set. Name: %s, per %d\n",
__func__, t->name, periodic);
@@ -1431,7 +1438,7 @@ void brcms_free_timer(struct brcms_timer *t)
if (wl->timers == t) {
wl->timers = wl->timers->next;
-#ifdef BCMDBG
+#ifdef DEBUG
kfree(t->name);
#endif
kfree(t);
@@ -1443,7 +1450,7 @@ void brcms_free_timer(struct brcms_timer *t)
while (tmp) {
if (tmp->next == t) {
tmp->next = t->next;
-#ifdef BCMDBG
+#ifdef DEBUG
kfree(t->name);
#endif
kfree(t);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 8f60419c37bf..9358bd5ebd35 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -40,7 +40,7 @@ struct brcms_timer {
bool periodic;
bool set; /* indicates if timer is active */
struct brcms_timer *next; /* for freeing on unload */
-#ifdef BCMDBG
+#ifdef DEBUG
char *name; /* Description of the timer */
#endif
};
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index f6affc6fd12a..231ddf4a674f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -14,6 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci_ids.h>
#include <linux/if_ether.h>
#include <net/mac80211.h>
@@ -293,11 +295,11 @@ const u8 prio2fifo[NUMPRIO] = {
/* debug/trace */
uint brcm_msg_level =
-#if defined(BCMDBG)
+#if defined(DEBUG)
LOG_ERROR_VAL;
#else
0;
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* TX FIFO number to WME/802.1E Access Category */
static const u8 wme_fifo2ac[] = {
@@ -342,14 +344,14 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{9, 58, 22, 14, 14, 5},
};
-#ifdef BCMDBG
+#ifdef DEBUG
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
#else
static const char fifo_names[6][0];
#endif
-#ifdef BCMDBG
+#ifdef DEBUG
/* pointer to most recently allocated wl/wlc */
static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
@@ -2899,7 +2901,6 @@ brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
objoff += 2;
return bcma_read16(core, objoff);
-;
}
static void
@@ -3075,30 +3076,30 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
{
int i;
struct macstat macstats;
-#ifdef BCMDBG
+#ifdef DEBUG
u16 delta;
u16 rxf0ovfl;
u16 txfunfl[NFIFO];
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* if driver down, make no sense to update stats */
if (!wlc->pub->up)
return;
-#ifdef BCMDBG
+#ifdef DEBUG
/* save last rx fifo 0 overflow count */
rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl;
/* save last tx fifo underflow count */
for (i = 0; i < NFIFO; i++)
txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i];
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* Read mac stats from contiguous shared memory */
brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats,
sizeof(struct macstat), OBJADDR_SHM_SEL);
-#ifdef BCMDBG
+#ifdef DEBUG
/* check for rx fifo 0 overflow */
delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
if (delta)
@@ -3114,7 +3115,7 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!"
"\n", wlc->pub->unit, delta, i);
}
-#endif /* BCMDBG */
+#endif /* DEBUG */
/* merge counters from dma module */
for (i = 0; i < NFIFO; i++) {
@@ -3246,7 +3247,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
}
/* For old ucode, txfifo sizes needs to be modified(increased) */
- if (fifosz_fixup == true)
+ if (fifosz_fixup)
brcms_b_corerev_fifofixup(wlc_hw);
/* check txfifo allocations match between ucode and driver */
@@ -5425,7 +5426,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
return -EINVAL;
/* update configuration value */
- if (config == true)
+ if (config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
/* Clear rateset override */
@@ -5765,62 +5766,49 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
return -ENODATA;
}
-#ifdef BCMDBG
-static const char * const supr_reason[] = {
- "None", "PMQ Entry", "Flush request",
- "Previous frag failure", "Channel mismatch",
- "Lifetime Expiry", "Underflow"
-};
-
-static void brcms_c_print_txs_status(u16 s)
-{
- printk(KERN_DEBUG "[15:12] %d frame attempts\n",
- (s & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT);
- printk(KERN_DEBUG " [11:8] %d rts attempts\n",
- (s & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT);
- printk(KERN_DEBUG " [7] %d PM mode indicated\n",
- ((s & TX_STATUS_PMINDCTD) ? 1 : 0));
- printk(KERN_DEBUG " [6] %d intermediate status\n",
- ((s & TX_STATUS_INTERMEDIATE) ? 1 : 0));
- printk(KERN_DEBUG " [5] %d AMPDU\n",
- (s & TX_STATUS_AMPDU) ? 1 : 0);
- printk(KERN_DEBUG " [4:2] %d Frame Suppressed Reason (%s)\n",
- ((s & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT),
- supr_reason[(s & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT]);
- printk(KERN_DEBUG " [1] %d acked\n",
- ((s & TX_STATUS_ACK_RCV) ? 1 : 0));
-}
-#endif /* BCMDBG */
-
void brcms_c_print_txstatus(struct tx_status *txs)
{
-#if defined(BCMDBG)
- u16 s = txs->status;
- u16 ackphyrxsh = txs->ackphyrxsh;
-
- printk(KERN_DEBUG "\ntxpkt (MPDU) Complete\n");
-
- printk(KERN_DEBUG "FrameID: %04x ", txs->frameid);
- printk(KERN_DEBUG "TxStatus: %04x", s);
- printk(KERN_DEBUG "\n");
-
- brcms_c_print_txs_status(s);
-
- printk(KERN_DEBUG "LastTxTime: %04x ", txs->lasttxtime);
- printk(KERN_DEBUG "Seq: %04x ", txs->sequence);
- printk(KERN_DEBUG "PHYTxStatus: %04x ", txs->phyerr);
- printk(KERN_DEBUG "RxAckRSSI: %04x ",
- (ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT);
- printk(KERN_DEBUG "RxAckSQ: %04x",
- (ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
- printk(KERN_DEBUG "\n");
-#endif /* defined(BCMDBG) */
+ pr_debug("\ntxpkt (MPDU) Complete\n");
+
+ pr_debug("FrameID: %04x TxStatus: %04x\n", txs->frameid, txs->status);
+
+ pr_debug("[15:12] %d frame attempts\n",
+ (txs->status & TX_STATUS_FRM_RTX_MASK) >>
+ TX_STATUS_FRM_RTX_SHIFT);
+ pr_debug(" [11:8] %d rts attempts\n",
+ (txs->status & TX_STATUS_RTS_RTX_MASK) >>
+ TX_STATUS_RTS_RTX_SHIFT);
+ pr_debug(" [7] %d PM mode indicated\n",
+ txs->status & TX_STATUS_PMINDCTD ? 1 : 0);
+ pr_debug(" [6] %d intermediate status\n",
+ txs->status & TX_STATUS_INTERMEDIATE ? 1 : 0);
+ pr_debug(" [5] %d AMPDU\n",
+ txs->status & TX_STATUS_AMPDU ? 1 : 0);
+ pr_debug(" [4:2] %d Frame Suppressed Reason (%s)\n",
+ (txs->status & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT,
+ (const char *[]) {
+ "None",
+ "PMQ Entry",
+ "Flush request",
+ "Previous frag failure",
+ "Channel mismatch",
+ "Lifetime Expiry",
+ "Underflow"
+ } [(txs->status & TX_STATUS_SUPR_MASK) >>
+ TX_STATUS_SUPR_SHIFT]);
+ pr_debug(" [1] %d acked\n",
+ txs->status & TX_STATUS_ACK_RCV ? 1 : 0);
+
+ pr_debug("LastTxTime: %04x Seq: %04x PHYTxStatus: %04x RxAckRSSI: %04x RxAckSQ: %04x\n",
+ txs->lasttxtime, txs->sequence, txs->phyerr,
+ (txs->ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT,
+ (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
}
bool brcms_c_chipmatch(u16 vendor, u16 device)
{
if (vendor != PCI_VENDOR_ID_BROADCOM) {
- pr_err("chipmatch: unknown vendor id %04x\n", vendor);
+ pr_err("unknown vendor id %04x\n", vendor);
return false;
}
@@ -5833,11 +5821,11 @@ bool brcms_c_chipmatch(u16 vendor, u16 device)
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
- pr_err("chipmatch: unknown device id %04x\n", device);
+ pr_err("unknown device id %04x\n", device);
return false;
}
-#if defined(BCMDBG)
+#if defined(DEBUG)
void brcms_c_print_txdesc(struct d11txh *txh)
{
u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
@@ -5871,57 +5859,56 @@ void brcms_c_print_txdesc(struct d11txh *txh)
struct ieee80211_rts rts = txh->rts_frame;
/* add plcp header along with txh descriptor */
- printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- txh, sizeof(struct d11txh) + 48);
-
- printk(KERN_DEBUG "TxCtlLow: %04x ", mtcl);
- printk(KERN_DEBUG "TxCtlHigh: %04x ", mtch);
- printk(KERN_DEBUG "FC: %04x ", mfc);
- printk(KERN_DEBUG "FES Time: %04x\n", tfest);
- printk(KERN_DEBUG "PhyCtl: %04x%s ", ptcw,
+ brcmu_dbg_hex_dump(txh, sizeof(struct d11txh) + 48,
+ "Raw TxDesc + plcp header:\n");
+
+ pr_debug("TxCtlLow: %04x ", mtcl);
+ pr_debug("TxCtlHigh: %04x ", mtch);
+ pr_debug("FC: %04x ", mfc);
+ pr_debug("FES Time: %04x\n", tfest);
+ pr_debug("PhyCtl: %04x%s ", ptcw,
(ptcw & PHY_TXC_SHORT_HDR) ? " short" : "");
- printk(KERN_DEBUG "PhyCtl_1: %04x ", ptcw_1);
- printk(KERN_DEBUG "PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
- printk(KERN_DEBUG "PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
- printk(KERN_DEBUG "PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
- printk(KERN_DEBUG "MainRates: %04x ", mainrates);
- printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
- printk(KERN_DEBUG "\n");
+ pr_debug("PhyCtl_1: %04x ", ptcw_1);
+ pr_debug("PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
+ pr_debug("PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
+ pr_debug("PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
+ pr_debug("MainRates: %04x ", mainrates);
+ pr_debug("XtraFrameTypes: %04x ", xtraft);
+ pr_debug("\n");
print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
ra, sizeof(txh->TxFrameRA));
- printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
+ pr_debug("Fb FES Time: %04x ", tfestfb);
print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
rtspfb, sizeof(txh->RTSPLCPFallback));
- printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
+ pr_debug("RTS DUR: %04x ", rtsdfb);
print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
fragpfb, sizeof(txh->FragPLCPFallback));
- printk(KERN_DEBUG "DUR: %04x", fragdfb);
- printk(KERN_DEBUG "\n");
+ pr_debug("DUR: %04x", fragdfb);
+ pr_debug("\n");
- printk(KERN_DEBUG "MModeLen: %04x ", mmodelen);
- printk(KERN_DEBUG "MModeFbrLen: %04x\n", mmodefbrlen);
+ pr_debug("MModeLen: %04x ", mmodelen);
+ pr_debug("MModeFbrLen: %04x\n", mmodefbrlen);
- printk(KERN_DEBUG "FrameID: %04x\n", tfid);
- printk(KERN_DEBUG "TxStatus: %04x\n", txs);
+ pr_debug("FrameID: %04x\n", tfid);
+ pr_debug("TxStatus: %04x\n", txs);
- printk(KERN_DEBUG "MaxNumMpdu: %04x\n", mnmpdu);
- printk(KERN_DEBUG "MaxAggbyte: %04x\n", mabyte);
- printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
- printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
+ pr_debug("MaxNumMpdu: %04x\n", mnmpdu);
+ pr_debug("MaxAggbyte: %04x\n", mabyte);
+ pr_debug("MaxAggbyte_fb: %04x\n", mabyte_f);
+ pr_debug("MinByte: %04x\n", mmbyte);
print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
rtsph, sizeof(txh->RTSPhyHeader));
print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
(u8 *)&rts, sizeof(txh->rts_frame));
- printk(KERN_DEBUG "\n");
+ pr_debug("\n");
}
-#endif /* defined(BCMDBG) */
+#endif /* defined(DEBUG) */
-#if defined(BCMDBG)
+#if defined(DEBUG)
static int
brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
int len)
@@ -5975,9 +5962,9 @@ brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
return (int)(p - buf);
}
-#endif /* defined(BCMDBG) */
+#endif /* defined(DEBUG) */
-#if defined(BCMDBG)
+#if defined(DEBUG)
void brcms_c_print_rxh(struct d11rxhdr *rxh)
{
u16 len = rxh->RxFrameSize;
@@ -5999,24 +5986,22 @@ void brcms_c_print_rxh(struct d11rxhdr *rxh)
{0, NULL}
};
- printk(KERN_DEBUG "Raw RxDesc:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, rxh,
- sizeof(struct d11rxhdr));
+ brcmu_dbg_hex_dump(rxh, sizeof(struct d11rxhdr), "Raw RxDesc:\n");
brcms_c_format_flags(macstat_flags, macstatus1, flagstr, 64);
snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
- printk(KERN_DEBUG "RxFrameSize: %6s (%d)%s\n", lenbuf, len,
+ pr_debug("RxFrameSize: %6s (%d)%s\n", lenbuf, len,
(rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : "");
- printk(KERN_DEBUG "RxPHYStatus: %04x %04x %04x %04x\n",
+ pr_debug("RxPHYStatus: %04x %04x %04x %04x\n",
phystatus_0, phystatus_1, phystatus_2, phystatus_3);
- printk(KERN_DEBUG "RxMACStatus: %x %s\n", macstatus1, flagstr);
- printk(KERN_DEBUG "RXMACaggtype: %x\n",
+ pr_debug("RxMACStatus: %x %s\n", macstatus1, flagstr);
+ pr_debug("RXMACaggtype: %x\n",
(macstatus2 & RXS_AGGTYPE_MASK));
- printk(KERN_DEBUG "RxTSFTime: %04x\n", rxh->RxTSFTime);
+ pr_debug("RxTSFTime: %04x\n", rxh->RxTSFTime);
}
-#endif /* defined(BCMDBG) */
+#endif /* defined(DEBUG) */
u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
{
@@ -8354,7 +8339,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
wlc->wiphy = wl->wiphy;
pub = wlc->pub;
-#if defined(BCMDBG)
+#if defined(DEBUG)
wlc_info_dbg = wlc;
#endif
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index adb136ec1f04..8debc74c54e1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -648,10 +648,12 @@ extern void brcms_c_print_txstatus(struct tx_status *txs);
extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
uint *blocks);
-#if defined(BCMDBG)
+#if defined(DEBUG)
extern void brcms_c_print_txdesc(struct d11txh *txh);
#else
-#define brcms_c_print_txdesc(a)
+static inline void brcms_c_print_txdesc(struct d11txh *txh)
+{
+}
#endif
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index a16f1ab292fd..39095741fd05 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14,6 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/cordic.h>
@@ -17822,8 +17824,6 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (pi->sh->sromrev < 4) {
idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
- target_pwr_qtrdbm[0] = 13 * 4;
- target_pwr_qtrdbm[1] = 13 * 4;
a1[0] = -424;
a1[1] = -424;
b0[0] = 5612;
@@ -17837,10 +17837,6 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
case WL_CHAN_FREQ_RANGE_2G:
idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_2g;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_2g;
a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_a1;
a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_2g_a1;
b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_2g_b0;
@@ -17851,10 +17847,6 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
case WL_CHAN_FREQ_RANGE_5GL:
idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_5gl;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_5gl;
a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1;
a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1;
b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0;
@@ -17865,10 +17857,6 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
case WL_CHAN_FREQ_RANGE_5GM:
idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_5gm;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_5gm;
a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_a1;
a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gm_a1;
b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gm_b0;
@@ -17879,10 +17867,6 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
case WL_CHAN_FREQ_RANGE_5GH:
idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_5g;
idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_5g;
- target_pwr_qtrdbm[0] =
- pi->nphy_pwrctrl_info[0].max_pwr_5gh;
- target_pwr_qtrdbm[1] =
- pi->nphy_pwrctrl_info[1].max_pwr_5gh;
a1[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1;
a1[1] = pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1;
b0[0] = pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0;
@@ -17893,8 +17877,6 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
default:
idle_tssi[0] = pi->nphy_pwrctrl_info[0].idle_tssi_2g;
idle_tssi[1] = pi->nphy_pwrctrl_info[1].idle_tssi_2g;
- target_pwr_qtrdbm[0] = 13 * 4;
- target_pwr_qtrdbm[1] = 13 * 4;
a1[0] = -424;
a1[1] = -424;
b0[0] = 5612;
@@ -17905,6 +17887,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
}
}
+ /* use the provided transmit power */
target_pwr_qtrdbm[0] = (s8) pi->tx_power_max;
target_pwr_qtrdbm[1] = (s8) pi->tx_power_max;
@@ -19987,12 +19970,11 @@ static void wlc_phy_radio_init_2057(struct brcms_phy *pi)
switch (pi->pubpi.radiorev) {
case 5:
- if (pi->pubpi.radiover == 0x0)
+ if (NREV_IS(pi->pubpi.phy_rev, 8))
regs_2057_ptr = regs_2057_rev5;
- else if (pi->pubpi.radiover == 0x1)
+ else if (NREV_IS(pi->pubpi.phy_rev, 9))
regs_2057_ptr = regs_2057_rev5v1;
- else
- break;
+ break;
case 7:
@@ -21462,7 +21444,7 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
u16 v0 = 0x211, v1 = 0x222, v2 = 0x144, v3 = 0x188;
- if (lut_init == false)
+ if (!lut_init)
return;
if (pi->srom_fem2g.antswctrllut == 0) {
@@ -26434,8 +26416,7 @@ cal_try:
}
if (bcmerror != 0) {
- printk(KERN_DEBUG "%s: Failed, cnt = %d\n", __func__,
- cal_retry);
+ pr_debug("%s: Failed, cnt = %d\n", __func__, cal_retry);
if (cal_retry < CAL_RETRY_CNT) {
cal_retry++;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index 563743643038..b96f4b9d74bd 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -621,7 +621,7 @@ static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
/*
* convert binary srom data into linked list of srom variable items.
*/
-static void
+static int
_initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
{
struct brcms_srom_list_head *entry;
@@ -638,6 +638,9 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
/* first store the srom revision */
entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
entry->varid = BRCMS_SROM_REV;
entry->var_type = BRCMS_SROM_UNUMBER;
entry->uval = sromrev;
@@ -715,6 +718,8 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
entry = kzalloc(sizeof(struct brcms_srom_list_head) +
extra_space, GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
entry->varid = id;
entry->var_type = type;
if (flags & SRFL_ETHADDR) {
@@ -754,6 +759,8 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
entry =
kzalloc(sizeof(struct brcms_srom_list_head),
GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
entry->varid = srv->varid+p;
entry->var_type = BRCMS_SROM_UNUMBER;
entry->uval = val;
@@ -761,6 +768,7 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
}
pb += psz;
}
+ return 0;
}
/*
@@ -906,7 +914,9 @@ int srom_var_init(struct si_pub *sih)
INIT_LIST_HEAD(&sii->var_list);
/* parse SROM into name=value pairs. */
- _initvars_srom_pci(sromrev, srom, &sii->var_list);
+ err = _initvars_srom_pci(sromrev, srom, &sii->var_list);
+ if (err)
+ srom_free_vars(sih);
}
errout:
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index b7537f70a795..b45ab34cdfdc 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -14,6 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -240,17 +242,35 @@ struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);
-#if defined(BCMDBG)
+#if defined(DEBUG)
/* pretty hex print a pkt buffer chain */
void brcmu_prpkt(const char *msg, struct sk_buff *p0)
{
struct sk_buff *p;
if (msg && (msg[0] != '\0'))
- printk(KERN_DEBUG "%s:\n", msg);
+ pr_debug("%s:\n", msg);
for (p = p0; p; p = p->next)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len);
}
EXPORT_SYMBOL(brcmu_prpkt);
-#endif /* defined(BCMDBG) */
+
+void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_debug("%pV", &vaf);
+
+ va_end(args);
+
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size);
+}
+EXPORT_SYMBOL(brcmu_dbg_hex_dump);
+#endif /* defined(DEBUG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index ad249a0b4730..477b92ad3d62 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -176,10 +176,21 @@ struct ipv4_addr;
/* externs */
/* format/print */
-#ifdef BCMDBG
+#ifdef DEBUG
extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
-#endif /* BCMDBG */
+#endif /* DEBUG */
+
+#ifdef DEBUG
+extern __printf(3, 4)
+void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
+#else
+__printf(3, 4)
+static inline
+void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
+{
+}
+#endif
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index a8bddd81b4d1..aa15cc4269a1 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -347,11 +347,9 @@ static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
return -EINTR;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (entry == NULL) {
- printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
- dev->name);
+ if (entry == NULL)
return -ENOMEM;
- }
+
atomic_set(&entry->usecnt, 1);
entry->type = CMD_SLEEP;
entry->cmd = cmd;
@@ -515,11 +513,9 @@ static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (entry == NULL) {
- printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
- "failed\n", dev->name);
+ if (entry == NULL)
return -ENOMEM;
- }
+
atomic_set(&entry->usecnt, 1);
entry->type = CMD_CALLBACK;
entry->cmd = cmd;
@@ -2978,11 +2974,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set)
local = iface->local;
new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
- if (new_entry == NULL) {
- printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
- local->dev->name);
+ if (new_entry == NULL)
return -ENOMEM;
- }
+
new_entry->aid = aid;
new_entry->set = set;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index a0e5c21d3657..f0551f807f69 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -298,8 +298,6 @@ static const char *command_types[] = {
};
#endif
-#define WEXT_USECHANNELS 1
-
static const long ipw2100_frequencies[] = {
2412, 2417, 2422, 2427,
2432, 2437, 2442, 2447,
@@ -309,13 +307,6 @@ static const long ipw2100_frequencies[] = {
#define FREQ_COUNT ARRAY_SIZE(ipw2100_frequencies)
-static const long ipw2100_rates_11b[] = {
- 1000000,
- 2000000,
- 5500000,
- 11000000
-};
-
static struct ieee80211_rate ipw2100_bg_rates[] = {
{ .bitrate = 10 },
{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
@@ -323,7 +314,7 @@ static struct ieee80211_rate ipw2100_bg_rates[] = {
{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
};
-#define RATE_COUNT ARRAY_SIZE(ipw2100_rates_11b)
+#define RATE_COUNT ARRAY_SIZE(ipw2100_bg_rates)
/* Pre-decl until we get the code solid and then we can clean it up */
static void ipw2100_tx_send_commands(struct ipw2100_priv *priv);
@@ -3464,11 +3455,8 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
priv->msg_buffers =
kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
GFP_KERNEL);
- if (!priv->msg_buffers) {
- printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg "
- "buffers.\n", priv->net_dev->name);
+ if (!priv->msg_buffers)
return -ENOMEM;
- }
for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
v = pci_alloc_consistent(priv->pci_dev,
@@ -6896,7 +6884,7 @@ static int ipw2100_wx_get_range(struct net_device *dev,
range->num_bitrates = RATE_COUNT;
for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
- range->bitrate[i] = ipw2100_rates_11b[i];
+ range->bitrate[i] = ipw2100_bg_rates[i].bitrate * 100 * 1000;
}
range->min_rts = MIN_RTS_THRESHOLD;
@@ -8108,61 +8096,41 @@ static int ipw2100_wx_get_crc_check(struct net_device *dev,
#endif /* CONFIG_IPW2100_MONITOR */
static iw_handler ipw2100_wx_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
- ipw2100_wx_get_name, /* SIOCGIWNAME */
- NULL, /* SIOCSIWNWID */
- NULL, /* SIOCGIWNWID */
- ipw2100_wx_set_freq, /* SIOCSIWFREQ */
- ipw2100_wx_get_freq, /* SIOCGIWFREQ */
- ipw2100_wx_set_mode, /* SIOCSIWMODE */
- ipw2100_wx_get_mode, /* SIOCGIWMODE */
- NULL, /* SIOCSIWSENS */
- NULL, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- ipw2100_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- NULL, /* SIOCSIWSPY */
- NULL, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
- ipw2100_wx_set_wap, /* SIOCSIWAP */
- ipw2100_wx_get_wap, /* SIOCGIWAP */
- ipw2100_wx_set_mlme, /* SIOCSIWMLME */
- NULL, /* SIOCGIWAPLIST -- deprecated */
- ipw2100_wx_set_scan, /* SIOCSIWSCAN */
- ipw2100_wx_get_scan, /* SIOCGIWSCAN */
- ipw2100_wx_set_essid, /* SIOCSIWESSID */
- ipw2100_wx_get_essid, /* SIOCGIWESSID */
- ipw2100_wx_set_nick, /* SIOCSIWNICKN */
- ipw2100_wx_get_nick, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- ipw2100_wx_set_rate, /* SIOCSIWRATE */
- ipw2100_wx_get_rate, /* SIOCGIWRATE */
- ipw2100_wx_set_rts, /* SIOCSIWRTS */
- ipw2100_wx_get_rts, /* SIOCGIWRTS */
- ipw2100_wx_set_frag, /* SIOCSIWFRAG */
- ipw2100_wx_get_frag, /* SIOCGIWFRAG */
- ipw2100_wx_set_txpow, /* SIOCSIWTXPOW */
- ipw2100_wx_get_txpow, /* SIOCGIWTXPOW */
- ipw2100_wx_set_retry, /* SIOCSIWRETRY */
- ipw2100_wx_get_retry, /* SIOCGIWRETRY */
- ipw2100_wx_set_encode, /* SIOCSIWENCODE */
- ipw2100_wx_get_encode, /* SIOCGIWENCODE */
- ipw2100_wx_set_power, /* SIOCSIWPOWER */
- ipw2100_wx_get_power, /* SIOCGIWPOWER */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- ipw2100_wx_set_genie, /* SIOCSIWGENIE */
- ipw2100_wx_get_genie, /* SIOCGIWGENIE */
- ipw2100_wx_set_auth, /* SIOCSIWAUTH */
- ipw2100_wx_get_auth, /* SIOCGIWAUTH */
- ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
- ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
+ IW_HANDLER(SIOCGIWNAME, ipw2100_wx_get_name),
+ IW_HANDLER(SIOCSIWFREQ, ipw2100_wx_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ipw2100_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ipw2100_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ipw2100_wx_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, ipw2100_wx_get_range),
+ IW_HANDLER(SIOCSIWAP, ipw2100_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, ipw2100_wx_get_wap),
+ IW_HANDLER(SIOCSIWMLME, ipw2100_wx_set_mlme),
+ IW_HANDLER(SIOCSIWSCAN, ipw2100_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, ipw2100_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, ipw2100_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ipw2100_wx_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, ipw2100_wx_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, ipw2100_wx_get_nick),
+ IW_HANDLER(SIOCSIWRATE, ipw2100_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ipw2100_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ipw2100_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ipw2100_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ipw2100_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ipw2100_wx_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, ipw2100_wx_set_txpow),
+ IW_HANDLER(SIOCGIWTXPOW, ipw2100_wx_get_txpow),
+ IW_HANDLER(SIOCSIWRETRY, ipw2100_wx_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, ipw2100_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, ipw2100_wx_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, ipw2100_wx_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, ipw2100_wx_set_power),
+ IW_HANDLER(SIOCGIWPOWER, ipw2100_wx_get_power),
+ IW_HANDLER(SIOCSIWGENIE, ipw2100_wx_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, ipw2100_wx_get_genie),
+ IW_HANDLER(SIOCSIWAUTH, ipw2100_wx_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, ipw2100_wx_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, ipw2100_wx_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, ipw2100_wx_get_encodeext),
};
#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index ecb561d7a7a0..570d6fb88967 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -27,8 +27,6 @@
#ifndef __ipw2200_h__
#define __ipw2200_h__
-#define WEXT_USECHANNELS 1
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -1999,18 +1997,6 @@ struct ipw_cmd_log {
#define CFG_SYS_ANTENNA_B 0x03 /* force antenna B */
#define CFG_SYS_ANTENNA_SLOW_DIV 0x02 /* consider background noise */
-/*
- * The definitions below were lifted off the ipw2100 driver, which only
- * supports 'b' mode, so I'm sure these are not exactly correct.
- *
- * Somebody fix these!!
- */
-#define REG_MIN_CHANNEL 0
-#define REG_MAX_CHANNEL 14
-
-#define REG_CHANNEL_MASK 0x00003FFF
-#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff
-
#define IPW_MAX_CONFIG_RETRIES 10
#endif /* __ipw2200_h__ */
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index d5ef696298ee..3adb24021a28 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -150,10 +150,9 @@ struct net_device *alloc_libipw(int sizeof_priv, int monitor)
LIBIPW_DEBUG_INFO("Initializing...\n");
dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv);
- if (!dev) {
- LIBIPW_ERROR("Unable to allocate network device.\n");
+ if (!dev)
goto failed;
- }
+
ieee = netdev_priv(dev);
ieee->dev = dev;
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 32a9966c3bf6..c4955d25a19a 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -172,7 +172,7 @@ libipw_rx_frame_mgmt(struct libipw_device *ieee, struct sk_buff *skb,
u16 stype)
{
if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
+ printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
ieee->dev->name);
return 0;
/*
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
index 5e1a19fd354d..f767dd106b09 100644
--- a/drivers/net/wireless/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -503,3 +503,9 @@ il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
kfree(buf);
return ret;
}
+
+const struct il_debugfs_ops il3945_debugfs_ops = {
+ .rx_stats_read = il3945_ucode_rx_stats_read,
+ .tx_stats_read = il3945_ucode_tx_stats_read,
+ .general_stats_read = il3945_ucode_general_stats_read,
+};
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index a7dfba8d164e..0c1209390169 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -140,7 +140,7 @@ il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- if (sta_id == il->ctx.bcast_sta_id)
+ if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -341,7 +341,7 @@ il3945_send_beacon_cmd(struct il_priv *il)
return -ENOMEM;
}
- rate = il_get_lowest_plcp(il, &il->ctx);
+ rate = il_get_lowest_plcp(il);
frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
@@ -512,7 +512,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find idx into station table for destination station */
- sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+ sta_id = il_sta_id_or_broadcast(il, info->control.sta);
if (sta_id == IL_INVALID_STATION) {
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
goto drop;
@@ -538,10 +538,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
idx = il_get_cmd_idx(q, q->write_ptr, 0);
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = &il->ctx;
+ txq->skbs[q->write_ptr] = skb;
/* Init first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[idx];
@@ -576,7 +573,6 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
len = (u16) skb->len;
tx_cmd->len = cpu_to_le16(len);
- il_dbg_log_tx_data_frame(il, len, hdr);
il_update_stats(il, true, fc, len);
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
@@ -619,8 +615,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
- il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
- 0);
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -629,8 +624,8 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, len,
PCI_DMA_TODEVICE);
- il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
- len, 0, U32_PAD(len));
+ il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
+ U32_PAD(len));
}
/* Tell device the write idx *just past* this latest filled TFD */
@@ -672,15 +667,13 @@ il3945_get_measurement(struct il_priv *il,
int rc;
int spectrum_resp_status;
int duration = le16_to_cpu(params->duration);
- struct il_rxon_context *ctx = &il->ctx;
if (il_is_associated(il))
add_time =
il_usecs_to_beacons(il,
le64_to_cpu(params->start_time) -
il->_3945.last_tsf,
- le16_to_cpu(ctx->timing.
- beacon_interval));
+ le16_to_cpu(il->timing.beacon_interval));
memset(&spectrum, 0, sizeof(spectrum));
@@ -694,15 +687,14 @@ il3945_get_measurement(struct il_priv *il,
if (il_is_associated(il))
spectrum.start_time =
il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
- le16_to_cpu(ctx->timing.
- beacon_interval));
+ le16_to_cpu(il->timing.beacon_interval));
else
spectrum.start_time = 0;
spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
spectrum.channels[0].channel = params->channel;
spectrum.channels[0].type = type;
- if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+ if (il->active.flags & RXON_FLG_BAND_24G_MSK)
spectrum.flags |=
RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
RXON_FLG_TGG_PROTECT_MSK;
@@ -817,16 +809,16 @@ il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
_il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
if (flags & HW_CARD_DISABLED)
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
else
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
il_scan_cancel(il);
- if ((test_bit(S_RF_KILL_HW, &status) !=
- test_bit(S_RF_KILL_HW, &il->status)))
+ if ((test_bit(S_RFKILL, &status) !=
+ test_bit(S_RFKILL, &il->status)))
wiphy_rfkill_set_hw_state(il->hw->wiphy,
- test_bit(S_RF_KILL_HW, &il->status));
+ test_bit(S_RFKILL, &il->status));
else
wake_up(&il->wait_command_queue);
}
@@ -2150,7 +2142,6 @@ il3945_alive_start(struct il_priv *il)
{
int thermal_spin = 0;
u32 rfkill;
- struct il_rxon_context *ctx = &il->ctx;
D_INFO("Runtime Alive received.\n");
@@ -2175,7 +2166,7 @@ il3945_alive_start(struct il_priv *il)
D_INFO("RFKILL status: 0x%x\n", rfkill);
if (rfkill & 0x1) {
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
/* if RFKILL is not on, then wait for thermal
* sensor in adapter to kick in */
while (il3945_hw_get_temperature(il) == 0) {
@@ -2187,7 +2178,7 @@ il3945_alive_start(struct il_priv *il)
D_INFO("Thermal calibration took %dus\n",
thermal_spin * 10);
} else
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
/* After the ALIVE response, we can send commands to 3945 uCode */
set_bit(S_ALIVE, &il->status);
@@ -2206,13 +2197,13 @@ il3945_alive_start(struct il_priv *il)
if (il_is_associated(il)) {
struct il3945_rxon_cmd *active_rxon =
- (struct il3945_rxon_cmd *)(&ctx->active);
+ (struct il3945_rxon_cmd *)(&il->active);
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- il_connection_init_rx_config(il, ctx);
+ il_connection_init_rx_config(il);
}
/* Configure Bluetooth device coexistence support */
@@ -2221,7 +2212,7 @@ il3945_alive_start(struct il_priv *il)
set_bit(S_READY, &il->status);
/* Configure the adapter for unassociated operation */
- il3945_commit_rxon(il, ctx);
+ il3945_commit_rxon(il);
il3945_reg_txpower_periodic(il);
@@ -2253,7 +2244,7 @@ __il3945_down(struct il_priv *il)
del_timer_sync(&il->watchdog);
/* Station information will now be cleared in device */
- il_clear_ucode_stations(il, NULL);
+ il_clear_ucode_stations(il);
il_dealloc_bcast_stations(il);
il_clear_driver_stations(il);
@@ -2281,12 +2272,8 @@ __il3945_down(struct il_priv *il)
* clear all bits but the RF Kill bits and return */
if (!il_is_init(il)) {
il->status =
- test_bit(S_RF_KILL_HW,
- &il->
- status) << S_RF_KILL_HW |
- test_bit(S_GEO_CONFIGURED,
- &il->
- status) << S_GEO_CONFIGURED |
+ test_bit(S_RFKILL, &il->status) << S_RFKILL |
+ test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
goto exit;
}
@@ -2294,25 +2281,30 @@ __il3945_down(struct il_priv *il)
/* ...otherwise clear out all the status bits but the RF Kill
* bit and continue taking the NIC down. */
il->status &=
- test_bit(S_RF_KILL_HW,
- &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
- &il->
- status) <<
- S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
- &il->
- status) << S_FW_ERROR |
+ test_bit(S_RFKILL, &il->status) << S_RFKILL |
+ test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
+ test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ /*
+ * We disabled and synchronized interrupt, and priv->mutex is taken, so
+ * here is the only thread which will program device registers, but
+ * still have lockdep assertions, so we are taking reg_lock.
+ */
+ spin_lock_irq(&il->reg_lock);
+ /* FIXME: il_grab_nic_access if rfkill is off ? */
+
il3945_hw_txq_ctx_stop(il);
il3945_hw_rxq_stop(il);
-
/* Power-down device's busmaster DMA clocks */
- il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
-
/* Stop the device, and put it in low power state */
- il_apm_stop(il);
+ _il_apm_stop(il);
+
+ spin_unlock_irq(&il->reg_lock);
+ il3945_hw_txq_ctx_free(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
@@ -2339,12 +2331,11 @@ il3945_down(struct il_priv *il)
static int
il3945_alloc_bcast_station(struct il_priv *il)
{
- struct il_rxon_context *ctx = &il->ctx;
unsigned long flags;
u8 sta_id;
spin_lock_irqsave(&il->sta_lock, flags);
- sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -2380,9 +2371,9 @@ __il3945_up(struct il_priv *il)
/* If platform's RF_KILL switch is NOT set to KILL */
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
else {
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
IL_WARN("Radio disabled by HW RF Kill switch\n");
return -ENODEV;
}
@@ -2414,7 +2405,7 @@ __il3945_up(struct il_priv *il)
il->ucode_data.len);
/* We return success when we resume from suspend and rf_kill is on. */
- if (test_bit(S_RF_KILL_HW, &il->status))
+ if (test_bit(S_RFKILL, &il->status))
return 0;
for (i = 0; i < MAX_HW_RESTARTS; i++) {
@@ -2422,7 +2413,7 @@ __il3945_up(struct il_priv *il)
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
- rc = il->cfg->ops->lib->load_ucode(il);
+ rc = il->ops->load_ucode(il);
if (rc) {
IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
@@ -2494,15 +2485,15 @@ il3945_rfkill_poll(struct work_struct *data)
{
struct il_priv *il =
container_of(data, struct il_priv, _3945.rfkill_poll.work);
- bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+ bool old_rfkill = test_bit(S_RFKILL, &il->status);
bool new_rfkill =
!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
if (new_rfkill != old_rfkill) {
if (new_rfkill)
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
else
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
@@ -2602,7 +2593,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
/* We don't build a direct scan probe request; the uCode will do
* that based on the direct_mask added to each channel entry */
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+ scan->tx_cmd.sta_id = il->hw_params.bcast_id;
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
/* flags + rate selection */
@@ -2664,14 +2655,12 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
void
il3945_post_scan(struct il_priv *il)
{
- struct il_rxon_context *ctx = &il->ctx;
-
/*
* Since setting the RXON may have been deferred while
* performing the scan, fire one off if needed
*/
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- il3945_commit_rxon(il, ctx);
+ if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
+ il3945_commit_rxon(il);
}
static void
@@ -2684,7 +2673,8 @@ il3945_bg_restart(struct work_struct *data)
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
mutex_lock(&il->mutex);
- il->ctx.vif = NULL;
+ /* FIXME: vif can be dereferenced */
+ il->vif = NULL;
il->is_open = 0;
mutex_unlock(&il->mutex);
il3945_down(il);
@@ -2722,13 +2712,12 @@ il3945_post_associate(struct il_priv *il)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
- struct il_rxon_context *ctx = &il->ctx;
- if (!ctx->vif || !il->is_open)
+ if (!il->vif || !il->is_open)
return;
- D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
- ctx->active.bssid_addr);
+ D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
+ il->active.bssid_addr);
if (test_bit(S_EXIT_PENDING, &il->status))
return;
@@ -2737,35 +2726,35 @@ il3945_post_associate(struct il_priv *il)
conf = &il->hw->conf;
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- il3945_commit_rxon(il, ctx);
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il);
- rc = il_send_rxon_timing(il, ctx);
+ rc = il_send_rxon_timing(il);
if (rc)
IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+ il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
- D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
- ctx->vif->bss_conf.beacon_int);
+ D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
+ il->vif->bss_conf.beacon_int);
- if (ctx->vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ if (il->vif->bss_conf.use_short_preamble)
+ il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (ctx->vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (il->vif->bss_conf.use_short_slot)
+ il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
- il3945_commit_rxon(il, ctx);
+ il3945_commit_rxon(il);
- switch (ctx->vif->type) {
+ switch (il->vif->type) {
case NL80211_IFTYPE_STATION:
il3945_rate_scale_init(il->hw, IL_AP_ID);
break;
@@ -2774,7 +2763,7 @@ il3945_post_associate(struct il_priv *il)
break;
default:
IL_ERR("%s Should not be called in %d mode\n", __func__,
- ctx->vif->type);
+ il->vif->type);
break;
}
}
@@ -2793,10 +2782,9 @@ il3945_mac_start(struct ieee80211_hw *hw)
struct il_priv *il = hw->priv;
int ret;
- D_MAC80211("enter\n");
-
/* we should be verifying the device is ready to be opened */
mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
* ucode filename and max sizes are card-specific. */
@@ -2891,8 +2879,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
void
il3945_config_ap(struct il_priv *il)
{
- struct il_rxon_context *ctx = &il->ctx;
- struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_vif *vif = il->vif;
int rc = 0;
if (test_bit(S_EXIT_PENDING, &il->status))
@@ -2902,31 +2889,31 @@ il3945_config_ap(struct il_priv *il)
if (!(il_is_associated(il))) {
/* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- il3945_commit_rxon(il, ctx);
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il);
/* RXON Timing */
- rc = il_send_rxon_timing(il, ctx);
+ rc = il_send_rxon_timing(il);
if (rc)
IL_WARN("C_RXON_TIMING failed - "
"Attempting to continue.\n");
- ctx->staging.assoc_id = 0;
+ il->staging.assoc_id = 0;
if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- il3945_commit_rxon(il, ctx);
+ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il);
}
il3945_send_beacon_cmd(il);
}
@@ -2953,15 +2940,19 @@ il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* hardware will then not attempt to decrypt the frames.
*/
if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ D_MAC80211("leave - IBSS RSN\n");
return -EOPNOTSUPP;
+ }
static_key = !il_is_associated(il);
if (!static_key) {
- sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
- if (sta_id == IL_INVALID_STATION)
+ sta_id = il_sta_id_or_broadcast(il, sta);
+ if (sta_id == IL_INVALID_STATION) {
+ D_MAC80211("leave - station not found\n");
return -EINVAL;
+ }
}
mutex_lock(&il->mutex);
@@ -2986,8 +2977,8 @@ il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ D_MAC80211("leave ret %d\n", ret);
mutex_unlock(&il->mutex);
- D_MAC80211("leave\n");
return ret;
}
@@ -3002,13 +2993,11 @@ il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
u8 sta_id;
- D_INFO("received request to add station %pM\n", sta->addr);
mutex_lock(&il->mutex);
- D_INFO("proceeding to add station %pM\n", sta->addr);
+ D_INFO("station %pM\n", sta->addr);
sta_priv->common.sta_id = IL_INVALID_STATION;
- ret =
- il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+ ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
if (ret) {
IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
@@ -3032,7 +3021,6 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
{
struct il_priv *il = hw->priv;
__le32 filter_or = 0, filter_nand = 0;
- struct il_rxon_context *ctx = &il->ctx;
#define CHK(test, flag) do { \
if (*total_flags & (test)) \
@@ -3052,8 +3040,8 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
mutex_lock(&il->mutex);
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
+ il->staging.filter_flags &= ~filter_nand;
+ il->staging.filter_flags |= filter_or;
/*
* Not committing directly because hardware can perform a scan,
@@ -3112,11 +3100,9 @@ il3945_store_debug_level(struct device *d, struct device_attribute *attr,
ret = strict_strtoul(buf, 0, &val);
if (ret)
IL_INFO("%s is not in hex or decimal form.\n", buf);
- else {
+ else
il->debug_level = val;
- if (il_alloc_traffic_mem(il))
- IL_ERR("Not enough memory to generate traffic log\n");
- }
+
return strnlen(buf, count);
}
@@ -3170,9 +3156,8 @@ static ssize_t
il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
{
struct il_priv *il = dev_get_drvdata(d);
- struct il_rxon_context *ctx = &il->ctx;
- return sprintf(buf, "0x%04X\n", ctx->active.flags);
+ return sprintf(buf, "0x%04X\n", il->active.flags);
}
static ssize_t
@@ -3181,17 +3166,16 @@ il3945_store_flags(struct device *d, struct device_attribute *attr,
{
struct il_priv *il = dev_get_drvdata(d);
u32 flags = simple_strtoul(buf, NULL, 0);
- struct il_rxon_context *ctx = &il->ctx;
mutex_lock(&il->mutex);
- if (le32_to_cpu(ctx->staging.flags) != flags) {
+ if (le32_to_cpu(il->staging.flags) != flags) {
/* Cancel any currently running scans... */
if (il_scan_cancel_timeout(il, 100))
IL_WARN("Could not cancel scan.\n");
else {
D_INFO("Committing rxon.flags = 0x%04X\n", flags);
- ctx->staging.flags = cpu_to_le32(flags);
- il3945_commit_rxon(il, ctx);
+ il->staging.flags = cpu_to_le32(flags);
+ il3945_commit_rxon(il);
}
}
mutex_unlock(&il->mutex);
@@ -3207,9 +3191,8 @@ il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
char *buf)
{
struct il_priv *il = dev_get_drvdata(d);
- struct il_rxon_context *ctx = &il->ctx;
- return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+ return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
}
static ssize_t
@@ -3217,19 +3200,18 @@ il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct il_priv *il = dev_get_drvdata(d);
- struct il_rxon_context *ctx = &il->ctx;
u32 filter_flags = simple_strtoul(buf, NULL, 0);
mutex_lock(&il->mutex);
- if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+ if (le32_to_cpu(il->staging.filter_flags) != filter_flags) {
/* Cancel any currently running scans... */
if (il_scan_cancel_timeout(il, 100))
IL_WARN("Could not cancel scan.\n");
else {
D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
filter_flags);
- ctx->staging.filter_flags = cpu_to_le32(filter_flags);
- il3945_commit_rxon(il, ctx);
+ il->staging.filter_flags = cpu_to_le32(filter_flags);
+ il3945_commit_rxon(il);
}
}
mutex_unlock(&il->mutex);
@@ -3278,9 +3260,8 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct il_priv *il = dev_get_drvdata(d);
- struct il_rxon_context *ctx = &il->ctx;
struct ieee80211_measurement_params params = {
- .channel = le16_to_cpu(ctx->active.channel),
+ .channel = le16_to_cpu(il->active.channel),
.start_time = cpu_to_le64(il->_3945.last_tsf),
.duration = cpu_to_le16(1),
};
@@ -3474,7 +3455,7 @@ static struct attribute_group il3945_attribute_group = {
.attrs = il3945_sysfs_entries,
};
-struct ieee80211_ops il3945_hw_ops = {
+struct ieee80211_ops il3945_mac_ops = {
.tx = il3945_mac_tx,
.start = il3945_mac_start,
.stop = il3945_mac_stop,
@@ -3567,7 +3548,8 @@ il3945_setup_mac(struct il_priv *il)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
- hw->wiphy->interface_modes = il->ctx.interface_modes;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |=
WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -3614,50 +3596,35 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* 1. Allocating HW data
* ********************/
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's ilate structure */
- hw = il_alloc_all(cfg);
- if (hw == NULL) {
- pr_err("Can not allocate network device\n");
+ hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops);
+ if (!hw) {
err = -ENOMEM;
goto out;
}
il = hw->priv;
+ il->hw = hw;
SET_IEEE80211_DEV(hw, &pdev->dev);
il->cmd_queue = IL39_CMD_QUEUE_NUM;
- il->ctx.ctxid = 0;
-
- il->ctx.rxon_cmd = C_RXON;
- il->ctx.rxon_timing_cmd = C_RXON_TIMING;
- il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
- il->ctx.qos_cmd = C_QOS_PARAM;
- il->ctx.ap_sta_id = IL_AP_ID;
- il->ctx.wep_key_cmd = C_WEPKEY;
- il->ctx.interface_modes =
- BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
- il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
- il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
- il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
-
/*
* Disabling hardware scan means that mac80211 will perform scans
* "the hard way", rather than using device's scan.
*/
if (il3945_mod_params.disable_hw_scan) {
D_INFO("Disabling hw_scan\n");
- il3945_hw_ops.hw_scan = NULL;
+ il3945_mac_ops.hw_scan = NULL;
}
D_INFO("*** LOAD DRIVER ***\n");
il->cfg = cfg;
+ il->ops = &il3945_ops;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il->debugfs_ops = &il3945_debugfs_ops;
+#endif
il->pci_dev = pdev;
il->inta_mask = CSR_INI_SET_MASK;
- if (il_alloc_traffic_mem(il))
- IL_ERR("Not enough memory to generate traffic log\n");
-
/***************************
* 2. Initializing PCI bus
* *************************/
@@ -3688,7 +3655,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/***********************
* 3. Read REV Register
* ********************/
- il->hw_base = pci_iomap(pdev, 0, 0);
+ il->hw_base = pci_ioremap_bar(pdev, 0);
if (!il->hw_base) {
err = -ENODEV;
goto out_pci_release_regions;
@@ -3702,7 +3669,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
- /* these spin locks will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_init and EEPROM access
* we should init now
*/
spin_lock_init(&il->reg_lock);
@@ -3773,8 +3740,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_irq;
}
- il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
- &il->ctx);
+ il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
il3945_setup_deferred_work(il);
il3945_setup_handlers(il);
il_power_initialize(il);
@@ -3814,14 +3780,13 @@ out_unset_hw_params:
out_eeprom_free:
il_eeprom_free(il);
out_iounmap:
- pci_iounmap(pdev, il->hw_base);
+ iounmap(il->hw_base);
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
- il_free_traffic_mem(il);
ieee80211_free_hw(il->hw);
out:
return err;
@@ -3889,12 +3854,11 @@ il3945_pci_remove(struct pci_dev *pdev)
* until now... */
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
- il_free_traffic_mem(il);
free_irq(pdev->irq, il);
pci_disable_msi(pdev);
- pci_iounmap(pdev, il->hw_base);
+ iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index d7a83f229190..70bee1a4d876 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -342,7 +342,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
int i;
D_INFO("enter\n");
- if (sta_id == il->ctx.bcast_sta_id)
+ if (sta_id == il->hw_params.bcast_id)
goto out;
psta = (struct il3945_sta_priv *)sta->drv_priv;
@@ -927,8 +927,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
rcu_read_lock();
- sta =
- ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+ sta = ieee80211_find_sta(il->vif, il->stations[sta_id].sta.sta.addr);
if (!sta) {
D_RATE("Unable to find station to initialize rate scaling.\n");
rcu_read_unlock();
@@ -944,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
switch (il->band) {
case IEEE80211_BAND_2GHZ:
/* TODO: this always does G, not a regression */
- if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+ if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
rs_sta->tgg = 1;
rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
} else
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index c80eb9b31551..c5b1d199e0bc 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -57,10 +57,6 @@ il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
return il_send_cmd(il, &cmd);
}
-const struct il_led_ops il3945_led_ops = {
- .cmd = il3945_send_led_cmd,
-};
-
#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
[RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
RATE_##r##M_IEEE, \
@@ -293,17 +289,17 @@ il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
{
struct il_tx_queue *txq = &il->txq[txq_id];
struct il_queue *q = &txq->q;
- struct il_tx_info *tx_info;
+ struct sk_buff *skb;
BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
- tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
- tx_info->skb = NULL;
- il->cfg->ops->lib->txq_free_tfd(il, txq);
+ skb = txq->skbs[txq->q.read_ptr];
+ ieee80211_tx_status_irqsafe(il->hw, skb);
+ txq->skbs[txq->q.read_ptr] = NULL;
+ il->ops->txq_free_tfd(il, txq);
}
if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
@@ -336,7 +332,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
}
txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
ieee80211_tx_info_clear_status(info);
/* Fill the MRR chain with some info about on-chip retransmissions */
@@ -577,8 +573,6 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
rx_status.signal, rx_status.signal, rx_status.rate_idx);
- il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
-
if (network_packet) {
il->_3945.last_beacon_time =
le32_to_cpu(rx_end->beacon_timestamp);
@@ -660,15 +654,13 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
PCI_DMA_TODEVICE);
/* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
+ if (txq->skbs) {
+ struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
/* can be called from irqs-disabled context */
if (skb) {
dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
+ txq->skbs[txq->q.read_ptr] = NULL;
}
}
}
@@ -798,7 +790,6 @@ il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
static int
il3945_tx_reset(struct il_priv *il)
{
-
/* bypass mode */
il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
@@ -835,8 +826,7 @@ il3945_tx_reset(struct il_priv *il)
static int
il3945_txq_ctx_reset(struct il_priv *il)
{
- int rc;
- int txq_id, slots_num;
+ int rc, txq_id;
il3945_hw_txq_ctx_free(il);
@@ -852,10 +842,7 @@ il3945_txq_ctx_reset(struct il_priv *il)
/* Tx queue(s) */
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
- slots_num =
- (txq_id ==
- IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ rc = il_tx_queue_init(il, txq_id);
if (rc) {
IL_ERR("Tx %d queue init failed\n", txq_id);
goto error;
@@ -960,12 +947,11 @@ il3945_hw_nic_init(struct il_priv *il)
struct il_rx_queue *rxq = &il->rxq;
spin_lock_irqsave(&il->lock, flags);
- il->cfg->ops->lib->apm_ops.init(il);
+ il3945_apm_init(il);
spin_unlock_irqrestore(&il->lock, flags);
il3945_set_pwr_vmain(il);
-
- il->cfg->ops->lib->apm_ops.config(il);
+ il3945_nic_config(il);
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
@@ -1016,7 +1002,7 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
il_tx_queue_free(il, txq_id);
/* free tx queue structure */
- il_txq_mem(il);
+ il_free_txq_mem(il);
}
void
@@ -1025,18 +1011,17 @@ il3945_hw_txq_ctx_stop(struct il_priv *il)
int txq_id;
/* stop SCD */
- il_wr_prph(il, ALM_SCD_MODE_REG, 0);
- il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+ _il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+ _il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
/* reset TFD queues */
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
- il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
- il_poll_bit(il, FH39_TSSR_TX_STATUS,
- FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
- 1000);
+ _il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+ _il_poll_bit(il, FH39_TSSR_TX_STATUS,
+ FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+ FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+ 1000);
}
-
- il3945_hw_txq_ctx_free(il);
}
/**
@@ -1388,7 +1373,7 @@ il3945_send_tx_power(struct il_priv *il)
int rate_idx, i;
const struct il_channel_info *ch_info = NULL;
struct il3945_txpowertable_cmd txpower = {
- .channel = il->ctx.active.channel,
+ .channel = il->active.channel,
};
u16 chan;
@@ -1397,7 +1382,7 @@ il3945_send_tx_power(struct il_priv *il)
"TX Power requested while scanning!\n"))
return -EAGAIN;
- chan = le16_to_cpu(il->ctx.active.channel);
+ chan = le16_to_cpu(il->active.channel);
txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
ch_info = il_get_channel_info(il, il->band, chan);
@@ -1615,7 +1600,7 @@ il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
}
/* send Txpower command for current channel to ucode */
- return il->cfg->ops->lib->send_tx_power(il);
+ return il->ops->send_tx_power(il);
}
int
@@ -1662,7 +1647,7 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
}
static int
-il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+il3945_send_rxon_assoc(struct il_priv *il)
{
int rc = 0;
struct il_rx_pkt *pkt;
@@ -1673,8 +1658,8 @@ il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
.flags = CMD_WANT_SKB,
.data = &rxon_assoc,
};
- const struct il_rxon_cmd *rxon1 = &ctx->staging;
- const struct il_rxon_cmd *rxon2 = &ctx->active;
+ const struct il_rxon_cmd *rxon1 = &il->staging;
+ const struct il_rxon_cmd *rxon2 = &il->active;
if (rxon1->flags == rxon2->flags &&
rxon1->filter_flags == rxon2->filter_flags &&
@@ -1684,10 +1669,10 @@ il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
return 0;
}
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.flags = il->staging.flags;
+ rxon_assoc.filter_flags = il->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
rxon_assoc.reserved = 0;
rc = il_send_cmd_sync(il, &cmd);
@@ -1714,11 +1699,11 @@ il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
* a HW tune is required based on the RXON structure changes.
*/
int
-il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+il3945_commit_rxon(struct il_priv *il)
{
/* cast away the const for active_rxon in this function */
- struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
- struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+ struct il3945_rxon_cmd *active_rxon = (void *)&il->active;
+ struct il3945_rxon_cmd *staging_rxon = (void *)&il->staging;
int rc = 0;
bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
@@ -1735,7 +1720,7 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
staging_rxon->flags |= il3945_get_antenna_flags(il);
- rc = il_check_rxon_cmd(il, ctx);
+ rc = il_check_rxon_cmd(il);
if (rc) {
IL_ERR("Invalid RXON configuration. Not committing.\n");
return -EINVAL;
@@ -1744,8 +1729,8 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
/* If we don't need to send a full RXON, we can use
* il3945_rxon_assoc_cmd which is used to reconfigure filter
* and other flags for the current radio configuration. */
- if (!il_full_rxon_required(il, &il->ctx)) {
- rc = il_send_rxon_assoc(il, &il->ctx);
+ if (!il_full_rxon_required(il)) {
+ rc = il_send_rxon_assoc(il);
if (rc) {
IL_ERR("Error setting RXON_ASSOC "
"configuration (%d).\n", rc);
@@ -1776,7 +1761,7 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
active_rxon->reserved4 = 0;
active_rxon->reserved5 = 0;
rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
- &il->ctx.active);
+ &il->active);
/* If the mask clearing failed then we set
* active_rxon back to what it was previously */
@@ -1786,8 +1771,8 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
"configuration (%d).\n", rc);
return rc;
}
- il_clear_ucode_stations(il, &il->ctx);
- il_restore_stations(il, &il->ctx);
+ il_clear_ucode_stations(il);
+ il_restore_stations(il);
}
D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
@@ -1801,7 +1786,7 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
staging_rxon->reserved4 = 0;
staging_rxon->reserved5 = 0;
- il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+ il_set_rxon_hwcrypto(il, !il3945_mod_params.sw_crypto);
/* Apply the new configuration */
rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
@@ -1814,8 +1799,8 @@ il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
if (!new_assoc) {
- il_clear_ucode_stations(il, &il->ctx);
- il_restore_stations(il, &il->ctx);
+ il_clear_ucode_stations(il);
+ il_restore_stations(il);
}
/* If we issue a new RXON command which required a tune then we must
@@ -2186,12 +2171,14 @@ il3945_txpower_set_from_eeprom(struct il_priv *il)
int
il3945_hw_rxq_stop(struct il_priv *il)
{
- int rc;
+ int ret;
- il_wr(il, FH39_RCSR_CONFIG(0), 0);
- rc = il_poll_bit(il, FH39_RSSR_STATUS,
- FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
- if (rc < 0)
+ _il_wr(il, FH39_RCSR_CONFIG(0), 0);
+ ret = _il_poll_bit(il, FH39_RSSR_STATUS,
+ FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+ FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+ 1000);
+ if (ret < 0)
IL_ERR("Can't stop Rx DMA.\n");
return 0;
@@ -2259,7 +2246,6 @@ il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
static int
il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
{
- struct il_rxon_context *ctx = &il->ctx;
int ret;
u8 sta_id;
unsigned long flags;
@@ -2267,7 +2253,7 @@ il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
if (sta_id_r)
*sta_id_r = IL_INVALID_STATION;
- ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
if (ret) {
IL_ERR("Unable to add station %pM\n", addr);
return ret;
@@ -2397,15 +2383,16 @@ il3945_hw_set_hw_params(struct il_priv *il)
return -ENOMEM;
}
+ il->hw_params.bcast_id = IL3945_BROADCAST_ID;
+
/* Assign number of Usable TX queues */
- il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+ il->hw_params.max_txq_num = il->cfg->num_of_queues;
il->hw_params.tfd_size = sizeof(struct il3945_tfd);
il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
il->hw_params.max_stations = IL3945_STATION_COUNT;
- il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
il->sta_key_max_num = STA_KEY_MAX_NUM;
@@ -2426,7 +2413,7 @@ il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
- tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+ tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
frame_size =
@@ -2633,76 +2620,31 @@ il3945_load_bsm(struct il_priv *il)
return 0;
}
-static struct il_hcmd_ops il3945_hcmd = {
- .rxon_assoc = il3945_send_rxon_assoc,
- .commit_rxon = il3945_commit_rxon,
-};
-
-static struct il_lib_ops il3945_lib = {
+const struct il_ops il3945_ops = {
.txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = il3945_hw_txq_free_tfd,
.txq_init = il3945_hw_tx_queue_init,
.load_ucode = il3945_load_bsm,
.dump_nic_error_log = il3945_dump_nic_error_log,
- .apm_ops = {
- .init = il3945_apm_init,
- .config = il3945_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .acquire_semaphore = il3945_eeprom_acquire_semaphore,
- .release_semaphore = il3945_eeprom_release_semaphore,
- },
+ .apm_init = il3945_apm_init,
.send_tx_power = il3945_send_tx_power,
.is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+ .eeprom_acquire_semaphore = il3945_eeprom_acquire_semaphore,
+ .eeprom_release_semaphore = il3945_eeprom_release_semaphore,
-#ifdef CONFIG_IWLEGACY_DEBUGFS
- .debugfs_ops = {
- .rx_stats_read = il3945_ucode_rx_stats_read,
- .tx_stats_read = il3945_ucode_tx_stats_read,
- .general_stats_read = il3945_ucode_general_stats_read,
- },
-#endif
-};
-
-static const struct il_legacy_ops il3945_legacy_ops = {
- .post_associate = il3945_post_associate,
- .config_ap = il3945_config_ap,
- .manage_ibss_station = il3945_manage_ibss_station,
-};
+ .rxon_assoc = il3945_send_rxon_assoc,
+ .commit_rxon = il3945_commit_rxon,
-static struct il_hcmd_utils_ops il3945_hcmd_utils = {
.get_hcmd_size = il3945_get_hcmd_size,
.build_addsta_hcmd = il3945_build_addsta_hcmd,
.request_scan = il3945_request_scan,
.post_scan = il3945_post_scan,
-};
-static const struct il_ops il3945_ops = {
- .lib = &il3945_lib,
- .hcmd = &il3945_hcmd,
- .utils = &il3945_hcmd_utils,
- .led = &il3945_led_ops,
- .legacy = &il3945_legacy_ops,
- .ieee80211_ops = &il3945_hw_ops,
-};
+ .post_associate = il3945_post_associate,
+ .config_ap = il3945_config_ap,
+ .manage_ibss_station = il3945_manage_ibss_station,
-static struct il_base_params il3945_base_params = {
- .eeprom_size = IL3945_EEPROM_IMG_SIZE,
- .num_of_queues = IL39_NUM_QUEUES,
- .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
- .set_l0s = false,
- .use_bsm = true,
- .led_compensation = 64,
- .wd_timeout = IL_DEF_WD_TIMEOUT,
+ .send_led_cmd = il3945_send_led_cmd,
};
static struct il_cfg il3945_bg_cfg = {
@@ -2712,10 +2654,26 @@ static struct il_cfg il3945_bg_cfg = {
.ucode_api_min = IL3945_UCODE_API_MIN,
.sku = IL_SKU_G,
.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &il3945_ops,
.mod_params = &il3945_mod_params,
- .base_params = &il3945_base_params,
.led_mode = IL_LED_BLINK,
+
+ .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+ .num_of_queues = IL39_NUM_QUEUES,
+ .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+ .set_l0s = false,
+ .use_bsm = true,
+ .led_compensation = 64,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
};
static struct il_cfg il3945_abg_cfg = {
@@ -2725,10 +2683,26 @@ static struct il_cfg il3945_abg_cfg = {
.ucode_api_min = IL3945_UCODE_API_MIN,
.sku = IL_SKU_A | IL_SKU_G,
.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &il3945_ops,
.mod_params = &il3945_mod_params,
- .base_params = &il3945_base_params,
.led_mode = IL_LED_BLINK,
+
+ .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+ .num_of_queues = IL39_NUM_QUEUES,
+ .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+ .set_l0s = false,
+ .use_bsm = true,
+ .led_compensation = 64,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
};
DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 9f42f79f8778..1d45075e0d5b 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -36,6 +36,8 @@ extern const struct pci_device_id il3945_hw_card_ids[];
#include "common.h"
+extern const struct il_ops il3945_ops;
+
/* Highest firmware API version supported */
#define IL3945_UCODE_API_MAX 2
@@ -249,7 +251,7 @@ extern int il4965_get_temperature(const struct il_priv *il);
extern void il3945_post_associate(struct il_priv *il);
extern void il3945_config_ap(struct il_priv *il);
-extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+extern int il3945_commit_rxon(struct il_priv *il);
/**
* il3945_hw_find_station - Find station id for a given BSSID
@@ -261,8 +263,6 @@ extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
*/
extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
-extern struct ieee80211_ops il3945_hw_ops;
-
extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
extern int il3945_init_hw_rate_table(struct il_priv *il);
extern void il3945_reg_txpower_periodic(struct il_priv *il);
@@ -595,13 +595,7 @@ struct il3945_tfd {
} __packed;
#ifdef CONFIG_IWLEGACY_DEBUGFS
-ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t il3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos);
+extern const struct il_debugfs_ops il3945_debugfs_ops;
#endif
#endif
diff --git a/drivers/net/wireless/iwlegacy/4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index d3248e3ef23b..e78bdefb8952 100644
--- a/drivers/net/wireless/iwlegacy/4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -79,18 +79,6 @@ struct stats_general_data {
u32 beacon_energy_c;
};
-void
-il4965_calib_free_results(struct il_priv *il)
-{
- int i;
-
- for (i = 0; i < IL_CALIB_MAX; i++) {
- kfree(il->calib_results[i].buf);
- il->calib_results[i].buf = NULL;
- il->calib_results[i].buf_len = 0;
- }
-}
-
/*****************************************************************************
* RUNTIME calibrations framework
*****************************************************************************/
@@ -627,13 +615,13 @@ il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
average_sig[0] =
data->chain_signal_a /
- il->cfg->base_params->chain_noise_num_beacons;
+ il->cfg->chain_noise_num_beacons;
average_sig[1] =
data->chain_signal_b /
- il->cfg->base_params->chain_noise_num_beacons;
+ il->cfg->chain_noise_num_beacons;
average_sig[2] =
data->chain_signal_c /
- il->cfg->base_params->chain_noise_num_beacons;
+ il->cfg->chain_noise_num_beacons;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -806,8 +794,6 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
unsigned long flags;
struct stats_rx_non_phy *rx_info;
- struct il_rxon_context *ctx = &il->ctx;
-
if (il->disable_chain_noise_cal)
return;
@@ -833,8 +819,8 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
return;
}
- rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
- rxon_chnum = le16_to_cpu(ctx->staging.channel);
+ rxon_band24 = !!(il->staging.flags & RXON_FLG_BAND_24G_MSK);
+ rxon_chnum = le16_to_cpu(il->staging.channel);
stat_band24 =
!!(((struct il_notif_stats *)stat_resp)->
@@ -888,7 +874,7 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != il->cfg->chain_noise_num_beacons)
return;
/* Analyze signal for disconnected antenna */
@@ -896,11 +882,11 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
/* Analyze noise for rx balance */
average_noise[0] =
- data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+ data->chain_noise_a / il->cfg->chain_noise_num_beacons;
average_noise[1] =
- data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+ data->chain_noise_b / il->cfg->chain_noise_num_beacons;
average_noise[2] =
- data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
+ data->chain_noise_c / il->cfg->chain_noise_num_beacons;
for (i = 0; i < NUM_RX_CHAINS; i++) {
if (!data->disconn_array[i] &&
@@ -925,8 +911,8 @@ il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
*/
- if (il->cfg->ops->lib->update_chain_flags)
- il->cfg->ops->lib->update_chain_flags(il);
+ if (il->ops->update_chain_flags)
+ il->ops->update_chain_flags(il);
data->state = IL_CHAIN_NOISE_DONE;
il_power_update_mode(il, false);
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
index 98ec39f56ba3..c8153fc64f74 100644
--- a/drivers/net/wireless/iwlegacy/4965-debug.c
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -744,3 +744,9 @@ il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
kfree(buf);
return ret;
}
+
+const struct il_debugfs_ops il4965_debugfs_ops = {
+ .rx_stats_read = il4965_ucode_rx_stats_read,
+ .tx_stats_read = il4965_ucode_tx_stats_read,
+ .general_stats_read = il4965_ucode_general_stats_read,
+};
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 1667232af647..7b54dbb338be 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -199,18 +199,14 @@ il4965_hw_nic_init(struct il_priv *il)
struct il_rx_queue *rxq = &il->rxq;
int ret;
- /* nic_init */
spin_lock_irqsave(&il->lock, flags);
- il->cfg->ops->lib->apm_ops.init(il);
-
+ il_apm_init(il);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
-
spin_unlock_irqrestore(&il->lock, flags);
il4965_set_pwr_vmain(il);
-
- il->cfg->ops->lib->apm_ops.config(il);
+ il4965_nic_config(il);
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
@@ -445,11 +441,15 @@ il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
int
il4965_rxq_stop(struct il_priv *il)
{
+ int ret;
- /* stop Rx DMA */
- il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
- FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+ FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+ 1000);
+ if (ret < 0)
+ IL_ERR("Can't stop Rx DMA.\n");
return 0;
}
@@ -692,7 +692,6 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = il4965_calc_rssi(il, phy_res);
- il_dbg_log_rx_data_frame(il, len, header);
D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
@@ -843,7 +842,6 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
.flags = CMD_SIZE_HUGE,
};
struct il_scan_cmd *scan;
- struct il_rxon_context *ctx = &il->ctx;
u32 rate_flags = 0;
u16 cmd_len;
u16 rx_chain = 0;
@@ -859,8 +857,6 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
lockdep_assert_held(&il->mutex);
- ctx = il_rxon_ctx_from_vif(vif);
-
if (!il->scan_cmd) {
il->scan_cmd =
kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
@@ -919,15 +915,14 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
D_SCAN("Start passive scan.\n");
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.sta_id = il->hw_params.bcast_id;
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (il->scan_band) {
case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod =
- le32_to_cpu(il->ctx.active.
- flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
RXON_FLG_CHANNEL_MODE_POS;
if (chan_mod == CHANNEL_MODE_PURE_40) {
rate = RATE_6M_PLCP;
@@ -1034,8 +1029,7 @@ il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
if (add)
- return il4965_add_bssid_station(il, vif_priv->ctx,
- vif->bss_conf.bssid,
+ return il4965_add_bssid_station(il, vif->bss_conf.bssid,
&vif_priv->ibss_bssid_sta_id);
return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
@@ -1128,7 +1122,7 @@ il4965_count_chain_bitmap(u32 chain_bitmap)
* This should not be used for scan command ... it puts data in wrong place.
*/
void
-il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+il4965_set_rxon_chain(struct il_priv *il)
{
bool is_single = il4965_is_single_rx_stream(il);
bool is_cam = !test_bit(S_POWER_PMI, &il->status);
@@ -1164,14 +1158,14 @@ il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
- ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+ il->staging.rx_chain = cpu_to_le16(rx_chain);
if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
- ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
else
- ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
- D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+ D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
active_rx_cnt, idle_rx_cnt);
WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
@@ -1348,12 +1342,11 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
}
#endif
-#define REG_RECALIB_PERIOD (60)
-
void
il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
{
- int change;
+ const int recalib_seconds = 60;
+ bool change;
struct il_rx_pkt *pkt = rxb_addr(rxb);
D_RX("Statistics notification received (%d vs %d).\n",
@@ -1374,20 +1367,21 @@ il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
set_bit(S_STATS, &il->status);
- /* Reschedule the stats timer to occur in
- * REG_RECALIB_PERIOD seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
+ /*
+ * Reschedule the stats timer to occur in recalib_seconds to ensure
+ * we get a thermal update even if the uCode doesn't give us one
+ */
mod_timer(&il->stats_periodic,
- jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+ jiffies + msecs_to_jiffies(recalib_seconds * 1000));
if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
(pkt->hdr.cmd == N_STATS)) {
il4965_rx_calc_noise(il);
queue_work(il->workqueue, &il->run_time_calib_work);
}
- if (il->cfg->ops->lib->temp_ops.temperature && change)
- il->cfg->ops->lib->temp_ops.temperature(il);
+
+ if (change)
+ il4965_temperature_calib(il);
}
void
@@ -1457,10 +1451,17 @@ il4965_get_ac_from_tid(u16 tid)
}
static inline int
-il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+il4965_get_fifo_from_tid(u16 tid)
{
+ const u8 ac_to_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+ };
+
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
+ return ac_to_fifo[tid_to_ac[tid]];
/* no support for TIDs 8-15 yet */
return -EINVAL;
@@ -1639,7 +1640,6 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
struct il_device_cmd *out_cmd;
struct il_cmd_meta *out_meta;
struct il_tx_cmd *tx_cmd;
- struct il_rxon_context *ctx = &il->ctx;
int txq_id;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
@@ -1655,9 +1655,6 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
unsigned long flags;
bool is_agg = false;
- if (info->control.vif)
- ctx = il_rxon_ctx_from_vif(info->control.vif);
-
spin_lock_irqsave(&il->lock, flags);
if (il_is_rfkill(il)) {
D_DROP("Dropping - RF KILL\n");
@@ -1679,10 +1676,10 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
/* For management frames use broadcast id to do not break aggregation */
if (!ieee80211_is_data(fc))
- sta_id = ctx->bcast_sta_id;
+ sta_id = il->hw_params.bcast_id;
else {
/* Find idx into station table for destination station */
- sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+ sta_id = il_sta_id_or_broadcast(il, info->control.sta);
if (sta_id == IL_INVALID_STATION) {
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1696,7 +1693,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
sta_priv = (void *)sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+ (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
@@ -1709,19 +1706,11 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
}
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+ /* FIXME: remove me ? */
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+ /* Access category (AC) is also the queue number */
+ txq_id = skb_get_queue_mapping(skb);
/* irqs already disabled/saved above when locking il->lock */
spin_lock(&il->sta_lock);
@@ -1763,10 +1752,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
spin_unlock(&il->sta_lock);
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
+ txq->skbs[q->write_ptr] = skb;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
@@ -1798,7 +1784,6 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
/* TODO need this for burst mode later on */
il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
- il_dbg_log_tx_data_frame(il, len, hdr);
il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
@@ -1828,8 +1813,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
dma_unmap_len_set(out_meta, len, firstlen);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
- il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
- 1, 0);
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
@@ -1845,8 +1829,8 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
PCI_DMA_TODEVICE);
- il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
- secondlen, 0, 0);
+ il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
+ 0, 0);
}
scratch_phys =
@@ -1866,9 +1850,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
/* Set up entry for this TFD in Tx byte-count array */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
- le16_to_cpu(tx_cmd->
- len));
+ il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
PCI_DMA_BIDIRECTIONAL);
@@ -1957,7 +1939,7 @@ il4965_hw_txq_ctx_free(struct il_priv *il)
il4965_free_dma_ptr(il, &il->scd_bc_tbls);
/* free tx queue structure */
- il_txq_mem(il);
+ il_free_txq_mem(il);
}
/**
@@ -1970,8 +1952,7 @@ il4965_hw_txq_ctx_free(struct il_priv *il)
int
il4965_txq_ctx_alloc(struct il_priv *il)
{
- int ret;
- int txq_id, slots_num;
+ int ret, txq_id;
unsigned long flags;
/* Free all tx/cmd queues and keep-warm buffer */
@@ -2008,10 +1989,7 @@ il4965_txq_ctx_alloc(struct il_priv *il)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
- slots_num =
- (txq_id ==
- il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ ret = il_tx_queue_init(il, txq_id);
if (ret) {
IL_ERR("Tx %d queue init failed\n", txq_id);
goto error;
@@ -2032,52 +2010,27 @@ error_bc_tbls:
void
il4965_txq_ctx_reset(struct il_priv *il)
{
- int txq_id, slots_num;
+ int txq_id;
unsigned long flags;
spin_lock_irqsave(&il->lock, flags);
/* Turn off all Tx DMA fifos */
il4965_txq_set_sched(il, 0);
-
/* Tell NIC where to find the "keep warm" buffer */
il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
spin_unlock_irqrestore(&il->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
- slots_num =
- txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
- }
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ il_tx_queue_reset(il, txq_id);
}
-/**
- * il4965_txq_ctx_stop - Stop all Tx DMA channels
- */
void
-il4965_txq_ctx_stop(struct il_priv *il)
+il4965_txq_ctx_unmap(struct il_priv *il)
{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&il->lock, flags);
-
- il4965_txq_set_sched(il, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
- il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (il_poll_bit
- (il, FH49_TSSR_TX_STATUS_REG,
- FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
- IL_ERR("Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- il_rd(il, FH49_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&il->lock, flags);
+ int txq_id;
if (!il->txq)
return;
@@ -2090,6 +2043,30 @@ il4965_txq_ctx_stop(struct il_priv *il)
il_tx_queue_unmap(il, txq_id);
}
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+ int ch, ret;
+
+ _il_wr_prph(il, IL49_SCD_TXFACT, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+ _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ ret =
+ _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000);
+ if (ret < 0)
+ IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
+ ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
+ }
+}
+
/*
* Find first available (lowest unused) Tx Queue, mark it "active".
* Called only when finding queue for aggregation.
@@ -2163,11 +2140,11 @@ il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IL49_FIRST_AMPDU_QUEUE +
- il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ il->cfg->num_of_ampdu_queues <= txq_id)) {
IL_WARN("queue number out of range: %d, must be %d to %d\n",
txq_id, IL49_FIRST_AMPDU_QUEUE,
IL49_FIRST_AMPDU_QUEUE +
- il->cfg->base_params->num_of_ampdu_queues - 1);
+ il->cfg->num_of_ampdu_queues - 1);
return -EINVAL;
}
@@ -2230,7 +2207,8 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
unsigned long flags;
struct il_tid_data *tid_data;
- tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ /* FIXME: warning if tx fifo not found ? */
+ tx_fifo = il4965_get_fifo_from_tid(tid);
if (unlikely(tx_fifo < 0))
return tx_fifo;
@@ -2290,11 +2268,11 @@ il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
{
if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IL49_FIRST_AMPDU_QUEUE +
- il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ il->cfg->num_of_ampdu_queues <= txq_id)) {
IL_WARN("queue number out of range: %d, must be %d to %d\n",
txq_id, IL49_FIRST_AMPDU_QUEUE,
IL49_FIRST_AMPDU_QUEUE +
- il->cfg->base_params->num_of_ampdu_queues - 1);
+ il->cfg->num_of_ampdu_queues - 1);
return -EINVAL;
}
@@ -2323,7 +2301,8 @@ il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
int write_ptr, read_ptr;
unsigned long flags;
- tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ /* FIXME: warning if tx_fifo_id not found ? */
+ tx_fifo_id = il4965_get_fifo_from_tid(tid);
if (unlikely(tx_fifo_id < 0))
return tx_fifo_id;
@@ -2397,9 +2376,6 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
struct il_queue *q = &il->txq[txq_id].q;
u8 *addr = il->stations[sta_id].sta.sta.addr;
struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
- struct il_rxon_context *ctx;
-
- ctx = &il->ctx;
lockdep_assert_held(&il->sta_lock);
@@ -2410,11 +2386,11 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
if (txq_id == tid_data->agg.txq_id &&
q->read_ptr == q->write_ptr) {
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+ int tx_fifo = il4965_get_fifo_from_tid(tid);
D_HT("HW queue empty: continue DELBA flow\n");
il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
tid_data->agg.state = IL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
}
break;
case IL_EMPTYING_HW_QUEUE_ADDBA:
@@ -2422,7 +2398,7 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
if (tid_data->tfds_in_queue == 0) {
D_HT("HW queue empty: continue ADDBA flow\n");
tid_data->agg.state = IL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
}
break;
}
@@ -2431,14 +2407,13 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
}
static void
-il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr1)
+il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
{
struct ieee80211_sta *sta;
struct il_station_priv *sta_priv;
rcu_read_lock();
- sta = ieee80211_find_sta(ctx->vif, addr1);
+ sta = ieee80211_find_sta(il->vif, addr1);
if (sta) {
sta_priv = (void *)sta->drv_priv;
/* avoid atomic ops if this isn't a client */
@@ -2450,14 +2425,14 @@ il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
}
static void
-il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!is_agg)
- il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+ il4965_non_agg_tx_status(il, hdr->addr1);
- ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+ ieee80211_tx_status_irqsafe(il->hw, skb);
}
int
@@ -2465,9 +2440,9 @@ il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
{
struct il_tx_queue *txq = &il->txq[txq_id];
struct il_queue *q = &txq->q;
- struct il_tx_info *tx_info;
int nfreed = 0;
struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
@@ -2479,20 +2454,19 @@ il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
- tx_info = &txq->txb[txq->q.read_ptr];
+ skb = txq->skbs[txq->q.read_ptr];
- if (WARN_ON_ONCE(tx_info->skb == NULL))
+ if (WARN_ON_ONCE(skb == NULL))
continue;
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ hdr = (struct ieee80211_hdr *) skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
nfreed++;
- il4965_tx_status(il, tx_info,
- txq_id >= IL4965_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
+ il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
- il->cfg->ops->lib->txq_free_tfd(il, txq);
+ txq->skbs[txq->q.read_ptr] = NULL;
+ il->ops->txq_free_tfd(il, txq);
}
return nfreed;
}
@@ -2555,7 +2529,7 @@ il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
- info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+ info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
memset(&info->status, 0, sizeof(info->status));
info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
@@ -2566,6 +2540,308 @@ il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
return 0;
}
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 *addr)
+{
+ int i;
+ int start = 0;
+ int ret = IL_INVALID_STATION;
+ unsigned long flags;
+
+ if (il->iw_mode == NL80211_IFTYPE_ADHOC)
+ start = IL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return il->hw_params.bcast_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = start; i < il->hw_params.max_stations; i++)
+ if (il->stations[i].used &&
+ (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+ ret = i;
+ goto out;
+ }
+
+ D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IL_INVALID_STATION &&
+ (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+ ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+ (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+ IL_ERR("Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+ if (il->iw_mode == NL80211_IFTYPE_STATION)
+ return IL_AP_ID;
+ else {
+ u8 *da = ieee80211_get_DA(hdr);
+
+ return il4965_find_station(il, da);
+ }
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+ return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+ struct il4965_tx_resp *tx_resp, int txq_id,
+ u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+ if (agg->wait_for_ba)
+ D_TX_REPLY("got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* num frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+ D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+ tx_resp->failure_frame);
+ D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+ struct sk_buff *skb;
+
+ /* Construct bit-map of pending frames within Tx win */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_IDX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status &
+ (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ skb = il->txq[txq_id].skbs[idx];
+ if (WARN_ON_ONCE(skb == NULL))
+ return -1;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IL_ERR("BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n", idx,
+ SEQ_TO_SN(sc), hdr->seq_ctrl);
+ return -1;
+ }
+
+ D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+ SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+ (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info;
+ struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->u.status);
+ int uninitialized_var(tid);
+ int sta_id;
+ int freed;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+
+ skb = txq->skbs[txq->q.read_ptr];
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ sta_id = il4965_get_ra_sta_id(il, hdr);
+ if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+ IL_ERR("Station not known\n");
+ return;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (txq->sched_retry) {
+ const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+ struct il_ht_agg *agg = NULL;
+ WARN_ON(!qc);
+
+ agg = &il->stations[sta_id].tid[tid].agg;
+
+ il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+ /* check if BAR is needed */
+ if (tx_resp->frame_count == 1 &&
+ !il4965_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+ "%d idx %d\n", scd_ssn, idx);
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc)
+ il4965_free_tfds_in_queue(il, sta_id, tid,
+ freed);
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+ }
+ } else {
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ D_TX_REPLY("TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n", txq_id,
+ il4965_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+ else if (sta_id == IL_INVALID_STATION)
+ D_TX_REPLY("Station not known\n");
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark)
+ il_wake_queue(il, txq);
+ }
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+ il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
/**
* translate ucode response to mac80211 tx status control values
*/
@@ -2771,8 +3047,7 @@ il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
* Function sleeps.
*/
int
-il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r)
+il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
{
int ret;
u8 sta_id;
@@ -2782,7 +3057,7 @@ il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
if (sta_id_r)
*sta_id_r = IL_INVALID_STATION;
- ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
if (ret) {
IL_ERR("Unable to add station %pM\n", addr);
return ret;
@@ -2803,7 +3078,7 @@ il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
return -ENOMEM;
}
- ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+ ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
if (ret)
IL_ERR("Link quality command failed (%d)\n", ret);
@@ -2815,19 +3090,19 @@ il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
}
static int
-il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
- bool send_if_empty)
+il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
{
- int i, not_empty = 0;
+ int i;
u8 buff[sizeof(struct il_wep_cmd) +
sizeof(struct il_wep_key) * WEP_KEYS_MAX];
struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
size_t cmd_size = sizeof(struct il_wep_cmd);
struct il_host_cmd cmd = {
- .id = ctx->wep_key_cmd,
+ .id = C_WEPKEY,
.data = wep_cmd,
.flags = CMD_SYNC,
};
+ bool not_empty = false;
might_sleep();
@@ -2835,24 +3110,23 @@ il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
for (i = 0; i < WEP_KEYS_MAX; i++) {
+ u8 key_size = il->_4965.wep_keys[i].key_size;
+
wep_cmd->key[i].key_idx = i;
- if (ctx->wep_keys[i].key_size) {
+ if (key_size) {
wep_cmd->key[i].key_offset = i;
- not_empty = 1;
- } else {
+ not_empty = true;
+ } else
wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
- }
- wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
- memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
- ctx->wep_keys[i].key_size);
+ wep_cmd->key[i].key_size = key_size;
+ memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
}
wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
wep_cmd->num_keys = WEP_KEYS_MAX;
cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
-
cmd.len = cmd_size;
if (not_empty || send_if_empty)
@@ -2862,66 +3136,66 @@ il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
}
int
-il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+il4965_restore_default_wep_keys(struct il_priv *il)
{
lockdep_assert_held(&il->mutex);
- return il4965_static_wepkey_cmd(il, ctx, false);
+ return il4965_static_wepkey_cmd(il, false);
}
int
-il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+il4965_remove_default_wep_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf)
{
int ret;
+ int idx = keyconf->keyidx;
lockdep_assert_held(&il->mutex);
- D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+ D_WEP("Removing default WEP key: idx=%d\n", idx);
- memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
if (il_is_rfkill(il)) {
D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
}
- ret = il4965_static_wepkey_cmd(il, ctx, 1);
- D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+ ret = il4965_static_wepkey_cmd(il, 1);
+ D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
return ret;
}
int
-il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+il4965_set_default_wep_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf)
{
int ret;
+ int len = keyconf->keylen;
+ int idx = keyconf->keyidx;
lockdep_assert_held(&il->mutex);
- if (keyconf->keylen != WEP_KEY_LEN_128 &&
- keyconf->keylen != WEP_KEY_LEN_64) {
+ if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
D_WEP("Bad WEP key length %d\n", keyconf->keylen);
return -EINVAL;
}
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->hw_key_idx = HW_KEY_DEFAULT;
- il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
- ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
- memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
- keyconf->keylen);
+ il->_4965.wep_keys[idx].key_size = len;
+ memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
- ret = il4965_static_wepkey_cmd(il, ctx, false);
- D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
- keyconf->keyidx, ret);
+ ret = il4965_static_wepkey_cmd(il, false);
+ D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
return ret;
}
static int
-il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+il4965_set_wep_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
@@ -2939,7 +3213,7 @@ il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
if (keyconf->keylen == WEP_KEY_LEN_128)
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
- if (sta_id == ctx->bcast_sta_id)
+ if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
spin_lock_irqsave(&il->sta_lock, flags);
@@ -2976,7 +3250,6 @@ il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
static int
il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
- struct il_rxon_context *ctx,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
@@ -2989,7 +3262,7 @@ il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
- if (sta_id == ctx->bcast_sta_id)
+ if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -3025,7 +3298,6 @@ il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
static int
il4965_set_tkip_dynamic_key_info(struct il_priv *il,
- struct il_rxon_context *ctx,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
@@ -3036,7 +3308,7 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
- if (sta_id == ctx->bcast_sta_id)
+ if (sta_id == il->hw_params.bcast_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -3070,9 +3342,8 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
}
void
-il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
u8 sta_id;
unsigned long flags;
@@ -3084,7 +3355,7 @@ il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
return;
}
- sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+ sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION)
return;
@@ -3102,11 +3373,10 @@ il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&il->sta_lock, flags);
-
}
int
-il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+il4965_remove_dynamic_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
@@ -3116,7 +3386,7 @@ il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
lockdep_assert_held(&il->mutex);
- ctx->key_mapping_keys--;
+ il->_4965.key_mapping_keys--;
spin_lock_irqsave(&il->sta_lock, flags);
key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
@@ -3167,28 +3437,28 @@ il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
}
int
-il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
+il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
{
int ret;
lockdep_assert_held(&il->mutex);
- ctx->key_mapping_keys++;
+ il->_4965.key_mapping_keys++;
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
ret =
- il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+ il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_TKIP:
ret =
- il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+ il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+ ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
break;
default:
IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
@@ -3210,14 +3480,14 @@ il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
* device at the next best time.
*/
int
-il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+il4965_alloc_bcast_station(struct il_priv *il)
{
struct il_link_quality_cmd *link_cmd;
unsigned long flags;
u8 sta_id;
spin_lock_irqsave(&il->sta_lock, flags);
- sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3250,11 +3520,11 @@ il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
* code together.
*/
static int
-il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+il4965_update_bcast_station(struct il_priv *il)
{
unsigned long flags;
struct il_link_quality_cmd *link_cmd;
- u8 sta_id = ctx->bcast_sta_id;
+ u8 sta_id = il->hw_params.bcast_id;
link_cmd = il4965_sta_alloc_lq(il, sta_id);
if (!link_cmd) {
@@ -3276,7 +3546,7 @@ il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
int
il4965_update_bcast_stations(struct il_priv *il)
{
- return il4965_update_bcast_station(il, &il->ctx);
+ return il4965_update_bcast_station(il);
}
/**
@@ -3376,10 +3646,10 @@ il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
void
il4965_update_chain_flags(struct il_priv *il)
{
- if (il->cfg->ops->hcmd->set_rxon_chain) {
- il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
- if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
- il_commit_rxon(il, &il->ctx);
+ if (il->ops->set_rxon_chain) {
+ il->ops->set_rxon_chain(il);
+ if (il->active.rx_chain != il->staging.rx_chain)
+ il_commit_rxon(il);
}
}
@@ -3491,8 +3761,8 @@ il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
lockdep_assert_held(&il->mutex);
- if (!il->beacon_ctx) {
- IL_ERR("trying to build beacon w/o beacon context!\n");
+ if (!il->beacon_enabled) {
+ IL_ERR("Trying to build beacon without beaconing enabled\n");
return 0;
}
@@ -3511,7 +3781,7 @@ il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
/* Set up TX command fields */
tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
- tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
tx_beacon_cmd->tx.tx_flags =
TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
@@ -3522,7 +3792,7 @@ il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
frame_size);
/* Set up packet rate and flags */
- rate = il_get_lowest_plcp(il, il->beacon_ctx);
+ rate = il_get_lowest_plcp(il);
il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
@@ -3645,15 +3915,13 @@ il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
PCI_DMA_TODEVICE);
/* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
+ if (txq->skbs) {
+ struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
/* can be called from irqs-disabled context */
if (skb) {
dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
+ txq->skbs[txq->q.read_ptr] = NULL;
}
}
}
@@ -3752,9 +4020,9 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
* This callback is provided in order to send a stats request.
*
* This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last N_STATS
- * was received. We need to ensure we receive the stats in order
- * to update the temperature used for calibrating the TXPOWER.
+ * 60 seconds since the last N_STATS was received. We need to
+ * ensure we receive the stats in order to update the temperature
+ * used for calibrating the TXPOWER.
*/
static void
il4965_bg_stats_periodic(unsigned long data)
@@ -3804,7 +4072,7 @@ il4965_perform_ct_kill_task(struct il_priv *il)
_il_rd(il, CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&il->reg_lock, flags);
- if (!_il_grab_nic_access(il))
+ if (likely(_il_grab_nic_access(il)))
_il_release_nic_access(il);
spin_unlock_irqrestore(&il->reg_lock, flags);
}
@@ -3842,17 +4110,17 @@ il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
il4965_perform_ct_kill_task(il);
if (flags & HW_CARD_DISABLED)
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
else
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
if (!(flags & RXON_CARD_DISABLED))
il_scan_cancel(il);
- if ((test_bit(S_RF_KILL_HW, &status) !=
- test_bit(S_RF_KILL_HW, &il->status)))
+ if ((test_bit(S_RFKILL, &status) !=
+ test_bit(S_RFKILL, &il->status)))
wiphy_rfkill_set_hw_state(il->hw->wiphy,
- test_bit(S_RF_KILL_HW, &il->status));
+ test_bit(S_RFKILL, &il->status));
else
wake_up(&il->wait_command_queue);
}
@@ -3894,10 +4162,11 @@ il4965_setup_handlers(struct il_priv *il)
/* Rx handlers */
il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+ il->handlers[N_RX] = il4965_hdl_rx;
/* block ack */
il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
- /* Set up hardware specific Rx handlers */
- il->cfg->ops->lib->handler_setup(il);
+ /* Tx response */
+ il->handlers[C_TX] = il4965_hdl_tx;
}
/**
@@ -4127,9 +4396,8 @@ il4965_irq_tasklet(struct il_priv *il)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
int hw_rf_kill = 0;
- if (!
- (_il_rd(il, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+
+ if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
hw_rf_kill = 1;
IL_WARN("RF_KILL bit toggled to %s.\n",
@@ -4144,9 +4412,9 @@ il4965_irq_tasklet(struct il_priv *il)
*/
if (!test_bit(S_ALIVE, &il->status)) {
if (hw_rf_kill)
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
else
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
}
@@ -4270,11 +4538,9 @@ il4965_store_debug_level(struct device *d, struct device_attribute *attr,
ret = strict_strtoul(buf, 0, &val);
if (ret)
IL_ERR("%s is not in hex or decimal form.\n", buf);
- else {
+ else
il->debug_level = val;
- if (il_alloc_traffic_mem(il))
- IL_ERR("Not enough memory to generate traffic log\n");
- }
+
return strnlen(buf, count);
}
@@ -4799,7 +5065,7 @@ il4965_dump_nic_error_log(struct il_priv *il)
else
base = le32_to_cpu(il->card_alive.error_event_table_ptr);
- if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ if (!il->ops->is_valid_rtc_data_addr(base)) {
IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
return;
@@ -4979,7 +5245,6 @@ static void
il4965_alive_start(struct il_priv *il)
{
int ret = 0;
- struct il_rxon_context *ctx = &il->ctx;
D_INFO("Runtime Alive received.\n");
@@ -5019,18 +5284,18 @@ il4965_alive_start(struct il_priv *il)
il->active_rate = RATES_MASK;
- if (il_is_associated_ctx(ctx)) {
+ if (il_is_associated(il)) {
struct il_rxon_cmd *active_rxon =
- (struct il_rxon_cmd *)&ctx->active;
+ (struct il_rxon_cmd *)&il->active;
/* apply any changes in staging */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- il_connection_init_rx_config(il, &il->ctx);
+ il_connection_init_rx_config(il);
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
}
/* Configure bluetooth coexistence if enabled */
@@ -5041,7 +5306,7 @@ il4965_alive_start(struct il_priv *il)
set_bit(S_READY, &il->status);
/* Configure the adapter for unassociated operation */
- il_commit_rxon(il, ctx);
+ il_commit_rxon(il);
/* At this point, the NIC is initialized and operational */
il4965_rf_kill_ct_config(il);
@@ -5076,7 +5341,21 @@ __il4965_down(struct il_priv *il)
* to prevent rearm timer */
del_timer_sync(&il->watchdog);
- il_clear_ucode_stations(il, NULL);
+ il_clear_ucode_stations(il);
+
+ /* FIXME: race conditions ? */
+ spin_lock_irq(&il->sta_lock);
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
+ il->_4965.key_mapping_keys = 0;
+ spin_unlock_irq(&il->sta_lock);
+
il_dealloc_bcast_stations(il);
il_clear_driver_stations(il);
@@ -5104,12 +5383,8 @@ __il4965_down(struct il_priv *il)
* clear all bits but the RF Kill bit and return */
if (!il_is_init(il)) {
il->status =
- test_bit(S_RF_KILL_HW,
- &il->
- status) << S_RF_KILL_HW |
- test_bit(S_GEO_CONFIGURED,
- &il->
- status) << S_GEO_CONFIGURED |
+ test_bit(S_RFKILL, &il->status) << S_RFKILL |
+ test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
goto exit;
}
@@ -5117,28 +5392,32 @@ __il4965_down(struct il_priv *il)
/* ...otherwise clear out all the status bits but the RF Kill
* bit and continue taking the NIC down. */
il->status &=
- test_bit(S_RF_KILL_HW,
- &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
- &il->
- status) <<
- S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
- &il->
- status) << S_FW_ERROR |
+ test_bit(S_RFKILL, &il->status) << S_RFKILL |
+ test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
+ test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ /*
+ * We disabled and synchronized interrupt, and priv->mutex is taken, so
+ * here is the only thread which will program device registers, but
+ * still have lockdep assertions, so we are taking reg_lock.
+ */
+ spin_lock_irq(&il->reg_lock);
+ /* FIXME: il_grab_nic_access if rfkill is off ? */
+
il4965_txq_ctx_stop(il);
il4965_rxq_stop(il);
-
/* Power-down device's busmaster DMA clocks */
- il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
-
/* Make sure (redundant) we've released our request to stay awake */
- il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
+ _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
- il_apm_stop(il);
+ _il_apm_stop(il);
+
+ spin_unlock_irq(&il->reg_lock);
+ il4965_txq_ctx_unmap(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
@@ -5159,40 +5438,36 @@ il4965_down(struct il_priv *il)
il4965_cancel_deferred_work(il);
}
-#define HW_READY_TIMEOUT (50)
-static int
+static void
il4965_set_hw_ready(struct il_priv *il)
{
- int ret = 0;
+ int ret;
il_set_bit(il, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
- ret =
- _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
- if (ret != -ETIMEDOUT)
+ ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ 100);
+ if (ret >= 0)
il->hw_ready = true;
- else
- il->hw_ready = false;
- D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
- return ret;
+ D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
}
-static int
+static void
il4965_prepare_card_hw(struct il_priv *il)
{
- int ret = 0;
+ int ret;
- D_INFO("il4965_prepare_card_hw enter\n");
+ il->hw_ready = false;
- ret = il4965_set_hw_ready(il);
+ il4965_set_hw_ready(il);
if (il->hw_ready)
- return ret;
+ return;
/* If HW is not ready, prepare the conditions to check again */
il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
@@ -5205,8 +5480,6 @@ il4965_prepare_card_hw(struct il_priv *il)
/* HW should be ready by now, check again. */
if (ret != -ETIMEDOUT)
il4965_set_hw_ready(il);
-
- return ret;
}
#define MAX_HW_RESTARTS 5
@@ -5227,29 +5500,26 @@ __il4965_up(struct il_priv *il)
return -EIO;
}
- ret = il4965_alloc_bcast_station(il, &il->ctx);
+ ret = il4965_alloc_bcast_station(il);
if (ret) {
il_dealloc_bcast_stations(il);
return ret;
}
il4965_prepare_card_hw(il);
-
if (!il->hw_ready) {
- IL_WARN("Exit HW not ready\n");
+ IL_ERR("HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(S_RF_KILL_HW, &il->status);
- else
- set_bit(S_RF_KILL_HW, &il->status);
-
- if (il_is_rfkill(il)) {
+ clear_bit(S_RFKILL, &il->status);
+ else {
+ set_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
- il_enable_interrupts(il);
+ il_enable_rfkill_int(il);
IL_WARN("Radio disabled by HW RF Kill switch\n");
return 0;
}
@@ -5288,7 +5558,7 @@ __il4965_up(struct il_priv *il)
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
- ret = il->cfg->ops->lib->load_ucode(il);
+ ret = il->ops->load_ucode(il);
if (ret) {
IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
@@ -5329,7 +5599,7 @@ il4965_bg_init_alive_start(struct work_struct *data)
if (test_bit(S_EXIT_PENDING, &il->status))
goto out;
- il->cfg->ops->lib->init_alive_start(il);
+ il->ops->init_alive_start(il);
out:
mutex_unlock(&il->mutex);
}
@@ -5381,7 +5651,8 @@ il4965_bg_restart(struct work_struct *data)
if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
mutex_lock(&il->mutex);
- il->ctx.vif = NULL;
+ /* FIXME: do we dereference vif without mutex locked ? */
+ il->vif = NULL;
il->is_open = 0;
__il4965_down(il);
@@ -5450,8 +5721,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->sta_data_size = sizeof(struct il_station_priv);
hw->vif_data_size = sizeof(struct il_vif_priv);
- hw->wiphy->interface_modes |= il->ctx.interface_modes;
- hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |=
WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
@@ -5578,12 +5849,10 @@ il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
{
struct il_priv *il = hw->priv;
- struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
D_MAC80211("enter\n");
- il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
- phase1key);
+ il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
D_MAC80211("leave\n");
}
@@ -5594,8 +5863,6 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct il_priv *il = hw->priv;
- struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct il_rxon_context *ctx = vif_priv->ctx;
int ret;
u8 sta_id;
bool is_default_wep_key = false;
@@ -5607,7 +5874,7 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+ sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION)
return -EINVAL;
@@ -5623,7 +5890,7 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
+ is_default_wep_key = !il->_4965.key_mapping_keys;
else
is_default_wep_key =
(key->hw_key_idx == HW_KEY_DEFAULT);
@@ -5632,20 +5899,17 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
if (is_default_wep_key)
- ret =
- il4965_set_default_wep_key(il, vif_priv->ctx, key);
+ ret = il4965_set_default_wep_key(il, key);
else
- ret =
- il4965_set_dynamic_key(il, vif_priv->ctx, key,
- sta_id);
+ ret = il4965_set_dynamic_key(il, key, sta_id);
D_MAC80211("enable hwcrypto key\n");
break;
case DISABLE_KEY:
if (is_default_wep_key)
- ret = il4965_remove_default_wep_key(il, ctx, key);
+ ret = il4965_remove_default_wep_key(il, key);
else
- ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+ ret = il4965_remove_dynamic_key(il, key, sta_id);
D_MAC80211("disable hwcrypto key\n");
break;
@@ -5711,7 +5975,6 @@ il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct il_priv *il = hw->priv;
struct il_station_priv *sta_priv = (void *)sta->drv_priv;
- struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
int ret;
u8 sta_id;
@@ -5724,8 +5987,7 @@ il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
atomic_set(&sta_priv->pending_frames, 0);
ret =
- il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
- &sta_id);
+ il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
if (ret) {
IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
@@ -5752,8 +6014,6 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = ch_switch->channel;
struct il_ht_config *ht_conf = &il->current_ht_config;
-
- struct il_rxon_context *ctx = &il->ctx;
u16 ch;
D_MAC80211("enter\n");
@@ -5768,14 +6028,14 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw,
test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
goto out;
- if (!il_is_associated_ctx(ctx))
+ if (!il_is_associated(il))
goto out;
- if (!il->cfg->ops->lib->set_channel_switch)
+ if (!il->ops->set_channel_switch)
goto out;
ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
+ if (le16_to_cpu(il->active.channel) == ch)
goto out;
ch_info = il_get_channel_info(il, channel->band, ch);
@@ -5789,30 +6049,30 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw,
il->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
+ il->ht.enabled = conf_is_ht(conf);
+ if (il->ht.enabled) {
if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
+ il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
+ il->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
+ il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
+ il->ht.is_40mhz = true;
} else {
- ctx->ht.extension_chan_offset =
+ il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
+ il->ht.is_40mhz = false;
}
} else
- ctx->ht.is_40mhz = false;
+ il->ht.is_40mhz = false;
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
+ if ((le16_to_cpu(il->staging.channel) != ch))
+ il->staging.flags = 0;
- il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_channel(il, channel);
il_set_rxon_ht(il, ht_conf);
- il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+ il_set_flags_for_band(il, channel->band, il->vif);
spin_unlock_irq(&il->lock);
@@ -5823,10 +6083,10 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw,
*/
set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
il->switch_channel = cpu_to_le16(ch);
- if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+ if (il->ops->set_channel_switch(il, ch_switch)) {
clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
il->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
+ ieee80211_chswitch_done(il->vif, false);
}
out:
@@ -5860,8 +6120,8 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
mutex_lock(&il->mutex);
- il->ctx.staging.filter_flags &= ~filter_nand;
- il->ctx.staging.filter_flags |= filter_or;
+ il->staging.filter_flags &= ~filter_nand;
+ il->staging.filter_flags |= filter_or;
/*
* Not committing directly because hardware can perform a scan,
@@ -5906,7 +6166,7 @@ il4965_bg_txpower_work(struct work_struct *work)
/* Regardless of if we are associated, we must reconfigure the
* TX power since frames can be sent on non-radar channels while
* not associated */
- il->cfg->ops->lib->send_tx_power(il);
+ il->ops->send_tx_power(il);
/* Update last_temperature to keep is_calib_needed from running
* when it isn't needed... */
@@ -6012,6 +6272,28 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
+const struct ieee80211_ops il4965_mac_ops = {
+ .tx = il4965_mac_tx,
+ .start = il4965_mac_start,
+ .stop = il4965_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il4965_configure_filter,
+ .set_key = il4965_mac_set_key,
+ .update_tkip_key = il4965_mac_update_tkip_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .ampdu_action = il4965_mac_ampdu_action,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il4965_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .channel_switch = il4965_mac_channel_switch,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
static int
il4965_init_drv(struct il_priv *il)
{
@@ -6036,8 +6318,8 @@ il4965_init_drv(struct il_priv *il)
il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
/* Choose which receivers/antennas to use */
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
il_init_scan_params(il);
@@ -6065,7 +6347,6 @@ err:
static void
il4965_uninit_drv(struct il_priv *il)
{
- il4965_calib_free_results(il);
il_free_geos(il);
il_free_channel_map(il);
kfree(il->scan_cmd);
@@ -6080,9 +6361,37 @@ il4965_hw_detect(struct il_priv *il)
D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
}
-static int
+static struct il_sensitivity_ranges il4965_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+
+ .auto_corr_min_ofdm = 85,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 140,
+ .auto_corr_max_ofdm_mrc_x1 = 270,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 200,
+ .auto_corr_max_cck_mrc = 400,
+
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static void
il4965_set_hw_params(struct il_priv *il)
{
+ il->hw_params.bcast_id = IL4965_BROADCAST_ID;
il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
if (il->cfg->mod_params->amsdu_size_8K)
@@ -6095,20 +6404,37 @@ il4965_set_hw_params(struct il_priv *il)
if (il->cfg->mod_params->disable_11n)
il->cfg->sku &= ~IL_SKU_N;
- /* Device-specific setup */
- return il->cfg->ops->lib->set_hw_params(il);
-}
+ if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+ il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+ il->cfg->num_of_queues =
+ il->cfg->mod_params->num_of_queues;
-static const u8 il4965_bss_ac_to_fifo[] = {
- IL_TX_FIFO_VO,
- IL_TX_FIFO_VI,
- IL_TX_FIFO_BE,
- IL_TX_FIFO_BK,
-};
+ il->hw_params.max_txq_num = il->cfg->num_of_queues;
+ il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+ il->hw_params.scd_bc_tbls_size =
+ il->cfg->num_of_queues *
+ sizeof(struct il4965_scd_bc_tbl);
-static const u8 il4965_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
+ il->hw_params.tfd_size = sizeof(struct il_tfd);
+ il->hw_params.max_stations = IL4965_STATION_COUNT;
+ il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+ il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+ il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+ il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+ il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+ il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+ il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+ il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+ il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+ il->hw_params.ct_kill_threshold =
+ CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+
+ il->hw_params.sens = &il4965_sensitivity;
+ il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+}
static int
il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -6124,43 +6450,24 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* 1. Allocating HW data
************************/
- hw = il_alloc_all(cfg);
+ hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
if (!hw) {
err = -ENOMEM;
goto out;
}
il = hw->priv;
- /* At this point both hw and il are allocated. */
-
- il->ctx.ctxid = 0;
-
- il->ctx.always_active = true;
- il->ctx.is_active = true;
- il->ctx.rxon_cmd = C_RXON;
- il->ctx.rxon_timing_cmd = C_RXON_TIMING;
- il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
- il->ctx.qos_cmd = C_QOS_PARAM;
- il->ctx.ap_sta_id = IL_AP_ID;
- il->ctx.wep_key_cmd = C_WEPKEY;
- il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
- il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
- il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
- il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
- il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
- il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
- il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
- il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
-
+ il->hw = hw;
SET_IEEE80211_DEV(hw, &pdev->dev);
D_INFO("*** LOAD DRIVER ***\n");
il->cfg = cfg;
+ il->ops = &il4965_ops;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il->debugfs_ops = &il4965_debugfs_ops;
+#endif
il->pci_dev = pdev;
il->inta_mask = CSR_INI_SET_MASK;
- if (il_alloc_traffic_mem(il))
- IL_ERR("Not enough memory to generate traffic log\n");
-
/**************************
* 2. Initializing PCI bus
**************************/
@@ -6199,7 +6506,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/***********************
* 3. Read REV register
***********************/
- il->hw_base = pci_iomap(pdev, 0, 0);
+ il->hw_base = pci_ioremap_bar(pdev, 0);
if (!il->hw_base) {
err = -ENODEV;
goto out_pci_release_regions;
@@ -6260,10 +6567,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************************
* 5. Setup HW constants
************************/
- if (il4965_set_hw_params(il)) {
- IL_ERR("failed to set hw parameters\n");
- goto out_free_eeprom;
- }
+ il4965_set_hw_params(il);
/*******************
* 6. Setup il
@@ -6307,12 +6611,12 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* If platform's RF_KILL switch is NOT set to KILL */
if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
else
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy,
- test_bit(S_RF_KILL_HW, &il->status));
+ test_bit(S_RFKILL, &il->status));
il_power_initialize(il);
@@ -6334,14 +6638,13 @@ out_disable_msi:
out_free_eeprom:
il_eeprom_free(il);
out_iounmap:
- pci_iounmap(pdev, il->hw_base);
+ iounmap(il->hw_base);
out_pci_release_regions:
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
out_ieee80211_free_hw:
- il_free_traffic_mem(il);
ieee80211_free_hw(il->hw);
out:
return err;
@@ -6412,11 +6715,10 @@ il4965_pci_remove(struct pci_dev *pdev)
* until now... */
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
- il_free_traffic_mem(il);
free_irq(il->pci_dev->irq, il);
pci_disable_msi(il->pci_dev);
- pci_iounmap(pdev, il->hw_base);
+ iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 467d0cb14ecd..d7e2856e41d3 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -641,13 +641,10 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
* there are no non-GF stations present in the BSS.
*/
static bool
-il4965_rs_use_green(struct ieee80211_sta *sta)
+il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
{
- struct il_station_priv *sta_priv = (void *)sta->drv_priv;
- struct il_rxon_context *ctx = sta_priv->common.ctx;
-
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
+ !il->ht.non_gf_sta_present;
}
/**
@@ -823,8 +820,6 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
u32 tx_rate;
struct il_scale_tbl_info tbl_type;
struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- struct il_station_priv *sta_priv = (void *)sta->drv_priv;
- struct il_rxon_context *ctx = sta_priv->common.ctx;
D_RATE("get frame ack response, update rate scale win\n");
@@ -892,7 +887,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@@ -1184,8 +1179,6 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
- struct il_station_priv *sta_priv = (void *)sta->drv_priv;
- struct il_rxon_context *ctx = sta_priv->common.ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1206,7 +1199,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
tbl->max_search = IL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1240,8 +1233,6 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
u16 rate_mask;
u8 is_green = lq_sta->is_green;
s32 rate;
- struct il_station_priv *sta_priv = (void *)sta->drv_priv;
- struct il_rxon_context *ctx = sta_priv->common.ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@@ -1254,7 +1245,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
tbl->max_search = IL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1733,8 +1724,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
* setup rate table in uCode
*/
static void
-il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
- struct il_lq_sta *lq_sta,
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_lq_sta *lq_sta,
struct il_scale_tbl_info *tbl, int idx, u8 is_green)
{
u32 rate;
@@ -1742,7 +1732,7 @@ il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
/* Update uCode's rate table. */
rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
il4965_rs_fill_link_cmd(il, lq_sta, rate);
- il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
}
/*
@@ -1778,8 +1768,6 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
s32 sr;
u8 tid = MAX_TID_COUNT;
struct il_tid_data *tid_data;
- struct il_station_priv *sta_priv = (void *)sta->drv_priv;
- struct il_rxon_context *ctx = sta_priv->common.ctx;
D_RATE("rate scale calculate new rate for skb\n");
@@ -1815,7 +1803,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
if (is_legacy(tbl->lq_type))
lq_sta->is_green = 0;
else
- lq_sta->is_green = il4965_rs_use_green(sta);
+ lq_sta->is_green = il4965_rs_use_green(il, sta);
is_green = lq_sta->is_green;
/* current tx rate */
@@ -1854,7 +1842,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
/* get "active" rate info */
idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
- il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx,
is_green);
}
return;
@@ -2057,8 +2045,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq)
- il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
- is_green);
+ il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx, is_green);
/* Should we stay with this modulation mode,
* or search for a new one? */
@@ -2098,7 +2085,7 @@ lq_update:
D_RATE("Switch current mcs: %X idx: %d\n",
tbl->current_rate, idx);
il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
- il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
} else
done_search = 1;
}
@@ -2166,17 +2153,15 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
int rate_idx;
int i;
u32 rate;
- u8 use_green = il4965_rs_use_green(sta);
+ u8 use_green = il4965_rs_use_green(il, sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
struct il_station_priv *sta_priv;
- struct il_rxon_context *ctx;
if (!sta || !lq_sta)
return;
sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
i = lq_sta->last_txrate_idx;
@@ -2208,7 +2193,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
il4965_rs_set_expected_tpt_table(lq_sta, tbl);
il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+ il_send_lq_cmd(il, &lq_sta->lq, CMD_SYNC, true);
}
static void
@@ -2341,7 +2326,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
lq_sta->is_dup = 0;
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
- lq_sta->is_green = il4965_rs_use_green(sta);
+ lq_sta->is_green = il4965_rs_use_green(il, sta);
lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
lq_sta->band = il->band;
/*
@@ -2579,9 +2564,6 @@ il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
char buf[64];
size_t buf_size;
u32 parsed_rate;
- struct il_station_priv *sta_priv =
- container_of(lq_sta, struct il_station_priv, lq_sta);
- struct il_rxon_context *ctx = sta_priv->common.ctx;
il = lq_sta->drv;
memset(buf, 0, sizeof(buf));
@@ -2603,7 +2585,7 @@ il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
if (lq_sta->dbg_fixed_rate) {
il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ il_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
return count;
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index cacbc03880b0..5db11714e047 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -264,10 +264,6 @@ il4965_led_enable(struct il_priv *il)
_il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
-const struct il_led_ops il4965_led_ops = {
- .cmd = il4965_send_led_cmd,
-};
-
static int il4965_send_tx_power(struct il_priv *il);
static int il4965_hw_get_temperature(struct il_priv *il);
@@ -508,7 +504,7 @@ iw4965_is_ht40_channel(__le32 rxon_flags)
chan_mod == CHANNEL_MODE_MIXED);
}
-static void
+void
il4965_nic_config(struct il_priv *il)
{
unsigned long flags;
@@ -569,82 +565,6 @@ il4965_chain_noise_reset(struct il_priv *il)
}
}
-static struct il_sensitivity_ranges il4965_sensitivity = {
- .min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
-
- .auto_corr_min_ofdm = 85,
- .auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 105,
- .auto_corr_min_ofdm_mrc_x1 = 220,
-
- .auto_corr_max_ofdm = 120,
- .auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 140,
- .auto_corr_max_ofdm_mrc_x1 = 270,
-
- .auto_corr_min_cck = 125,
- .auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 200,
- .auto_corr_max_cck_mrc = 400,
-
- .nrg_th_cck = 100,
- .nrg_th_ofdm = 100,
-
- .barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
- .nrg_th_cca = 62,
-};
-
-static void
-il4965_set_ct_threshold(struct il_priv *il)
-{
- /* want Kelvin */
- il->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * il4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int
-il4965_hw_set_hw_params(struct il_priv *il)
-{
- if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
- il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
- il->cfg->base_params->num_of_queues =
- il->cfg->mod_params->num_of_queues;
-
- il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
- il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
- il->hw_params.scd_bc_tbls_size =
- il->cfg->base_params->num_of_queues *
- sizeof(struct il4965_scd_bc_tbl);
- il->hw_params.tfd_size = sizeof(struct il_tfd);
- il->hw_params.max_stations = IL4965_STATION_COUNT;
- il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
- il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
- il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
- il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
- il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
-
- il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
- il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
- il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
- il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
-
- il4965_set_ct_threshold(il);
-
- il->hw_params.sens = &il4965_sensitivity;
- il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
static s32
il4965_math_div_round(s32 num, s32 denom, s32 * res)
{
@@ -1342,7 +1262,6 @@ il4965_send_tx_power(struct il_priv *il)
u8 band = 0;
bool is_ht40 = false;
u8 ctrl_chan_high = 0;
- struct il_rxon_context *ctx = &il->ctx;
if (WARN_ONCE
(test_bit(S_SCAN_HW, &il->status),
@@ -1351,16 +1270,16 @@ il4965_send_tx_power(struct il_priv *il)
band = il->band == IEEE80211_BAND_2GHZ;
- is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+ is_ht40 = iw4965_is_ht40_channel(il->active.flags);
- if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ if (is_ht40 && (il->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
cmd.band = band;
- cmd.channel = ctx->active.channel;
+ cmd.channel = il->active.channel;
ret =
- il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+ il4965_fill_txpower_tbl(il, band, le16_to_cpu(il->active.channel),
is_ht40, ctrl_chan_high, &cmd.tx_power);
if (ret)
goto out;
@@ -1372,12 +1291,12 @@ out:
}
static int
-il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+il4965_send_rxon_assoc(struct il_priv *il)
{
int ret = 0;
struct il4965_rxon_assoc_cmd rxon_assoc;
- const struct il_rxon_cmd *rxon1 = &ctx->staging;
- const struct il_rxon_cmd *rxon2 = &ctx->active;
+ const struct il_rxon_cmd *rxon1 = &il->staging;
+ const struct il_rxon_cmd *rxon2 = &il->active;
if (rxon1->flags == rxon2->flags &&
rxon1->filter_flags == rxon2->filter_flags &&
@@ -1392,16 +1311,16 @@ il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
return 0;
}
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.flags = il->staging.flags;
+ rxon_assoc.filter_flags = il->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
rxon_assoc.reserved = 0;
rxon_assoc.ofdm_ht_single_stream_basic_rates =
- ctx->staging.ofdm_ht_single_stream_basic_rates;
+ il->staging.ofdm_ht_single_stream_basic_rates;
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- ctx->staging.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+ il->staging.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = il->staging.rx_chain;
ret =
il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
@@ -1411,23 +1330,20 @@ il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
}
static int
-il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+il4965_commit_rxon(struct il_priv *il)
{
/* cast away the const for active_rxon in this function */
- struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+ struct il_rxon_cmd *active_rxon = (void *)&il->active;
int ret;
- bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+ bool new_assoc = !!(il->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
if (!il_is_alive(il))
return -EBUSY;
- if (!ctx->is_active)
- return 0;
-
/* always get timestamp with Rx frame */
- ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+ il->staging.flags |= RXON_FLG_TSF2HOST_MSK;
- ret = il_check_rxon_cmd(il, ctx);
+ ret = il_check_rxon_cmd(il);
if (ret) {
IL_ERR("Invalid RXON configuration. Not committing.\n");
return -EINVAL;
@@ -1438,7 +1354,7 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
* abort any previous channel switch if still in process
*/
if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
- il->switch_channel != ctx->staging.channel) {
+ il->switch_channel != il->staging.channel) {
D_11H("abort channel switch on %d\n",
le16_to_cpu(il->switch_channel));
il_chswitch_done(il, false);
@@ -1447,15 +1363,15 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
/* If we don't need to send a full RXON, we can use
* il_rxon_assoc_cmd which is used to reconfigure filter
* and other flags for the current radio configuration. */
- if (!il_full_rxon_required(il, ctx)) {
- ret = il_send_rxon_assoc(il, ctx);
+ if (!il_full_rxon_required(il)) {
+ ret = il_send_rxon_assoc(il);
if (ret) {
IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
return ret;
}
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- il_print_rx_config_cmd(il, ctx);
+ memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
+ il_print_rx_config_cmd(il);
/*
* We do not commit tx power settings while channel changing,
* do it now if tx power changed.
@@ -1468,12 +1384,12 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
* before we apply the new config */
- if (il_is_associated_ctx(ctx) && new_assoc) {
+ if (il_is_associated(il) && new_assoc) {
D_INFO("Toggling associated bit on current RXON\n");
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
ret =
- il_send_cmd_pdu(il, ctx->rxon_cmd,
+ il_send_cmd_pdu(il, C_RXON,
sizeof(struct il_rxon_cmd), active_rxon);
/* If the mask clearing failed then we set
@@ -1483,9 +1399,9 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
return ret;
}
- il_clear_ucode_stations(il, ctx);
- il_restore_stations(il, ctx);
- ret = il4965_restore_default_wep_keys(il, ctx);
+ il_clear_ucode_stations(il);
+ il_restore_stations(il);
+ ret = il4965_restore_default_wep_keys(il);
if (ret) {
IL_ERR("Failed to restore WEP keys (%d)\n", ret);
return ret;
@@ -1494,9 +1410,9 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
"* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
- le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+ le16_to_cpu(il->staging.channel), il->staging.bssid_addr);
- il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+ il_set_rxon_hwcrypto(il, !il->cfg->mod_params->sw_crypto);
/* Apply the new configuration
* RXON unassoc clears the station table in uCode so restoration of
@@ -1504,17 +1420,17 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
*/
if (!new_assoc) {
ret =
- il_send_cmd_pdu(il, ctx->rxon_cmd,
- sizeof(struct il_rxon_cmd), &ctx->staging);
+ il_send_cmd_pdu(il, C_RXON,
+ sizeof(struct il_rxon_cmd), &il->staging);
if (ret) {
IL_ERR("Error setting new RXON (%d)\n", ret);
return ret;
}
D_INFO("Return from !new_assoc RXON.\n");
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- il_clear_ucode_stations(il, ctx);
- il_restore_stations(il, ctx);
- ret = il4965_restore_default_wep_keys(il, ctx);
+ memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
+ il_clear_ucode_stations(il);
+ il_restore_stations(il);
+ ret = il4965_restore_default_wep_keys(il);
if (ret) {
IL_ERR("Failed to restore WEP keys (%d)\n", ret);
return ret;
@@ -1526,15 +1442,15 @@ il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
* RXON assoc doesn't clear the station table in uCode,
*/
ret =
- il_send_cmd_pdu(il, ctx->rxon_cmd,
- sizeof(struct il_rxon_cmd), &ctx->staging);
+ il_send_cmd_pdu(il, C_RXON,
+ sizeof(struct il_rxon_cmd), &il->staging);
if (ret) {
IL_ERR("Error setting new RXON (%d)\n", ret);
return ret;
}
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
}
- il_print_rx_config_cmd(il, ctx);
+ il_print_rx_config_cmd(il);
il4965_init_sensitivity(il);
@@ -1553,7 +1469,6 @@ static int
il4965_hw_channel_switch(struct il_priv *il,
struct ieee80211_channel_switch *ch_switch)
{
- struct il_rxon_context *ctx = &il->ctx;
int rc;
u8 band = 0;
bool is_ht40 = false;
@@ -1564,21 +1479,24 @@ il4965_hw_channel_switch(struct il_priv *il,
u16 ch;
u32 tsf_low;
u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- band = il->band == IEEE80211_BAND_2GHZ;
+ u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
+ struct ieee80211_vif *vif = il->vif;
+ band = (il->band == IEEE80211_BAND_2GHZ);
+
+ if (WARN_ON_ONCE(vif == NULL))
+ return -EIO;
- is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+ is_ht40 = iw4965_is_ht40_channel(il->staging.flags);
- if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ if (is_ht40 && (il->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
cmd.band = band;
cmd.expect_beacon = 0;
ch = ch_switch->channel->hw_value;
cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ cmd.rxon_flags = il->staging.flags;
+ cmd.rxon_filter_flags = il->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
@@ -1611,7 +1529,7 @@ il4965_hw_channel_switch(struct il_priv *il,
cmd.expect_beacon = il_is_channel_radar(ch_info);
else {
IL_ERR("invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
+ il->active.channel, ch);
return -EFAULT;
}
@@ -1756,7 +1674,7 @@ il4965_is_temp_calib_needed(struct il_priv *il)
return 1;
}
-static void
+void
il4965_temperature_calib(struct il_priv *il)
{
s32 temp;
@@ -1815,339 +1733,21 @@ il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
return (u16) sizeof(struct il4965_addsta_cmd);
}
-static inline u32
-il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
-{
- return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-static inline u32
-il4965_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool
-il4965_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
-}
-
-/**
- * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int
-il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
- struct il4965_tx_resp *tx_resp, int txq_id,
- u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = tx_resp->u.agg_status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
- if (agg->wait_for_ba)
- D_TX_REPLY("got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* num frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= il4965_tx_status_to_mac80211(status);
- il4965_hwrate_to_tx_control(il, rate_n_flags, info);
-
- D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
- tx_resp->failure_frame);
- D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx win */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_IDX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status &
- (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
-
- hdr = il_tx_queue_get_hdr(il, txq_id, idx);
- if (!hdr) {
- IL_ERR("BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IL_ERR("BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n", idx,
- SEQ_TO_SN(sc), hdr->seq_ctrl);
- return -1;
- }
-
- D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
- SEQ_TO_SN(sc));
-
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
- (unsigned long long)bitmap);
- }
-
- agg->bitmap = bitmap;
- agg->start_idx = start;
- D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-static u8
-il4965_find_station(struct il_priv *il, const u8 * addr)
-{
- int i;
- int start = 0;
- int ret = IL_INVALID_STATION;
- unsigned long flags;
-
- if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
- start = IL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return il->ctx.bcast_sta_id;
-
- spin_lock_irqsave(&il->sta_lock, flags);
- for (i = start; i < il->hw_params.max_stations; i++)
- if (il->stations[i].used &&
- (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
- ret = i;
- goto out;
- }
-
- D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
-
-out:
- /*
- * It may be possible that more commands interacting with stations
- * arrive before we completed processing the adding of
- * station
- */
- if (ret != IL_INVALID_STATION &&
- (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
- ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
- (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
- IL_ERR("Requested station info for sta %d before ready.\n",
- ret);
- ret = IL_INVALID_STATION;
- }
- spin_unlock_irqrestore(&il->sta_lock, flags);
- return ret;
-}
-
-static int
-il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
-{
- if (il->iw_mode == NL80211_IFTYPE_STATION) {
- return IL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return il4965_find_station(il, da);
- }
-}
-
-/**
- * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
- */
-static void
-il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
-{
- struct il_rx_pkt *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int idx = SEQ_TO_IDX(sequence);
- struct il_tx_queue *txq = &il->txq[txq_id];
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *info;
- struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
- int sta_id;
- int freed;
- u8 *qc = NULL;
- unsigned long flags;
-
- if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
- IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
- "is out of range [0-%d] %d %d\n", txq_id, idx,
- txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- memset(&info->status, 0, sizeof(info->status));
-
- hdr = il_tx_queue_get_hdr(il, txq_id, idx);
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- }
-
- sta_id = il4965_get_ra_sta_id(il, hdr);
- if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
- IL_ERR("Station not known\n");
- return;
- }
-
- spin_lock_irqsave(&il->sta_lock, flags);
- if (txq->sched_retry) {
- const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
- struct il_ht_agg *agg = NULL;
- WARN_ON(!qc);
-
- agg = &il->stations[sta_id].tid[tid].agg;
-
- il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) &&
- !il4965_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- D_TX_REPLY("Retry scheduler reclaim scd_ssn "
- "%d idx %d\n", scd_ssn, idx);
- freed = il4965_tx_queue_reclaim(il, txq_id, idx);
- if (qc)
- il4965_free_tfds_in_queue(il, sta_id, tid,
- freed);
-
- if (il->mac80211_registered &&
- il_queue_space(&txq->q) > txq->q.low_mark &&
- agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
- il_wake_queue(il, txq);
- }
- } else {
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= il4965_tx_status_to_mac80211(status);
- il4965_hwrate_to_tx_control(il,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- D_TX_REPLY("TXQ %d status %s (0x%08x) "
- "rate_n_flags 0x%x retries %d\n", txq_id,
- il4965_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = il4965_tx_queue_reclaim(il, txq_id, idx);
- if (qc && likely(sta_id != IL_INVALID_STATION))
- il4965_free_tfds_in_queue(il, sta_id, tid, freed);
- else if (sta_id == IL_INVALID_STATION)
- D_TX_REPLY("Station not known\n");
-
- if (il->mac80211_registered &&
- il_queue_space(&txq->q) > txq->q.low_mark)
- il_wake_queue(il, txq);
- }
- if (qc && likely(sta_id != IL_INVALID_STATION))
- il4965_txq_check_empty(il, sta_id, tid, txq_id);
-
- il4965_check_abort_status(il, tx_resp->frame_count, status);
-
- spin_unlock_irqrestore(&il->sta_lock, flags);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void
-il4965_handler_setup(struct il_priv *il)
-{
- /* Legacy Rx frames */
- il->handlers[N_RX] = il4965_hdl_rx;
- /* Tx response */
- il->handlers[C_TX] = il4965_hdl_tx;
-}
-
-static struct il_hcmd_ops il4965_hcmd = {
- .rxon_assoc = il4965_send_rxon_assoc,
- .commit_rxon = il4965_commit_rxon,
- .set_rxon_chain = il4965_set_rxon_chain,
-};
-
static void
il4965_post_scan(struct il_priv *il)
{
- struct il_rxon_context *ctx = &il->ctx;
-
/*
* Since setting the RXON may have been deferred while
* performing the scan, fire one off if needed
*/
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- il_commit_rxon(il, ctx);
+ if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
+ il_commit_rxon(il);
}
static void
il4965_post_associate(struct il_priv *il)
{
- struct il_rxon_context *ctx = &il->ctx;
- struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_vif *vif = il->vif;
struct ieee80211_conf *conf = NULL;
int ret = 0;
@@ -2161,41 +1761,41 @@ il4965_post_associate(struct il_priv *il)
conf = &il->hw->conf;
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- il_commit_rxon(il, ctx);
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il);
- ret = il_send_rxon_timing(il, ctx);
+ ret = il_send_rxon_timing(il);
if (ret)
IL_WARN("RXON timing - " "Attempting to continue.\n");
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
il_set_rxon_ht(il, &il->current_ht_config);
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
vif->bss_conf.beacon_int);
if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
- il_commit_rxon(il, ctx);
+ il_commit_rxon(il);
D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
- ctx->active.bssid_addr);
+ il->active.bssid_addr);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -2223,8 +1823,7 @@ il4965_post_associate(struct il_priv *il)
static void
il4965_config_ap(struct il_priv *il)
{
- struct il_rxon_context *ctx = &il->ctx;
- struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_vif *vif = il->vif;
int ret = 0;
lockdep_assert_held(&il->mutex);
@@ -2233,14 +1832,14 @@ il4965_config_ap(struct il_priv *il)
return;
/* The following should be done only at AP bring up */
- if (!il_is_associated_ctx(ctx)) {
+ if (!il_is_associated(il)) {
/* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- il_commit_rxon(il, ctx);
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il);
/* RXON Timing */
- ret = il_send_rxon_timing(il, ctx);
+ ret = il_send_rxon_timing(il);
if (ret)
IL_WARN("RXON timing failed - "
"Attempting to continue.\n");
@@ -2248,133 +1847,63 @@ il4965_config_ap(struct il_priv *il)
/* AP has all antennas */
il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
il_set_rxon_ht(il, &il->current_ht_config);
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
- ctx->staging.assoc_id = 0;
+ il->staging.assoc_id = 0;
if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
/* need to send beacon cmd before committing assoc RXON! */
il4965_send_beacon_cmd(il);
/* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- il_commit_rxon(il, ctx);
+ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il);
}
il4965_send_beacon_cmd(il);
}
-static struct il_hcmd_utils_ops il4965_hcmd_utils = {
- .get_hcmd_size = il4965_get_hcmd_size,
- .build_addsta_hcmd = il4965_build_addsta_hcmd,
- .request_scan = il4965_request_scan,
- .post_scan = il4965_post_scan,
-};
-
-static struct il_lib_ops il4965_lib = {
- .set_hw_params = il4965_hw_set_hw_params,
+const struct il_ops il4965_ops = {
.txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
.txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = il4965_hw_txq_free_tfd,
.txq_init = il4965_hw_tx_queue_init,
- .handler_setup = il4965_handler_setup,
.is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
.init_alive_start = il4965_init_alive_start,
.load_ucode = il4965_load_bsm,
.dump_nic_error_log = il4965_dump_nic_error_log,
.dump_fh = il4965_dump_fh,
.set_channel_switch = il4965_hw_channel_switch,
- .apm_ops = {
- .init = il_apm_init,
- .config = il4965_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
- .acquire_semaphore = il4965_eeprom_acquire_semaphore,
- .release_semaphore = il4965_eeprom_release_semaphore,
- },
+ .apm_init = il_apm_init,
.send_tx_power = il4965_send_tx_power,
.update_chain_flags = il4965_update_chain_flags,
- .temp_ops = {
- .temperature = il4965_temperature_calib,
- },
-#ifdef CONFIG_IWLEGACY_DEBUGFS
- .debugfs_ops = {
- .rx_stats_read = il4965_ucode_rx_stats_read,
- .tx_stats_read = il4965_ucode_tx_stats_read,
- .general_stats_read = il4965_ucode_general_stats_read,
- },
-#endif
-};
+ .eeprom_acquire_semaphore = il4965_eeprom_acquire_semaphore,
+ .eeprom_release_semaphore = il4965_eeprom_release_semaphore,
+
+ .rxon_assoc = il4965_send_rxon_assoc,
+ .commit_rxon = il4965_commit_rxon,
+ .set_rxon_chain = il4965_set_rxon_chain,
+
+ .get_hcmd_size = il4965_get_hcmd_size,
+ .build_addsta_hcmd = il4965_build_addsta_hcmd,
+ .request_scan = il4965_request_scan,
+ .post_scan = il4965_post_scan,
-static const struct il_legacy_ops il4965_legacy_ops = {
.post_associate = il4965_post_associate,
.config_ap = il4965_config_ap,
.manage_ibss_station = il4965_manage_ibss_station,
.update_bcast_stations = il4965_update_bcast_stations,
-};
-struct ieee80211_ops il4965_hw_ops = {
- .tx = il4965_mac_tx,
- .start = il4965_mac_start,
- .stop = il4965_mac_stop,
- .add_interface = il_mac_add_interface,
- .remove_interface = il_mac_remove_interface,
- .change_interface = il_mac_change_interface,
- .config = il_mac_config,
- .configure_filter = il4965_configure_filter,
- .set_key = il4965_mac_set_key,
- .update_tkip_key = il4965_mac_update_tkip_key,
- .conf_tx = il_mac_conf_tx,
- .reset_tsf = il_mac_reset_tsf,
- .bss_info_changed = il_mac_bss_info_changed,
- .ampdu_action = il4965_mac_ampdu_action,
- .hw_scan = il_mac_hw_scan,
- .sta_add = il4965_mac_sta_add,
- .sta_remove = il_mac_sta_remove,
- .channel_switch = il4965_mac_channel_switch,
- .tx_last_beacon = il_mac_tx_last_beacon,
-};
-
-static const struct il_ops il4965_ops = {
- .lib = &il4965_lib,
- .hcmd = &il4965_hcmd,
- .utils = &il4965_hcmd_utils,
- .led = &il4965_led_ops,
- .legacy = &il4965_legacy_ops,
- .ieee80211_ops = &il4965_hw_ops,
-};
-
-static struct il_base_params il4965_base_params = {
- .eeprom_size = IL4965_EEPROM_IMG_SIZE,
- .num_of_queues = IL49_NUM_QUEUES,
- .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
- .pll_cfg_val = 0,
- .set_l0s = true,
- .use_bsm = true,
- .led_compensation = 61,
- .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
- .wd_timeout = IL_DEF_WD_TIMEOUT,
- .temperature_kelvin = true,
- .ucode_tracing = true,
- .sensitivity_calib_by_driver = true,
- .chain_noise_calib_by_driver = true,
+ .send_led_cmd = il4965_send_led_cmd,
};
struct il_cfg il4965_cfg = {
@@ -2387,15 +1916,38 @@ struct il_cfg il4965_cfg = {
.valid_rx_ant = ANT_ABC,
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
- .ops = &il4965_ops,
.mod_params = &il4965_mod_params,
- .base_params = &il4965_base_params,
.led_mode = IL_LED_BLINK,
/*
* Force use of chains B and C for scan RX on 5 GHz band
* because the device has off-channel reception on chain A.
*/
.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+
+ .eeprom_size = IL4965_EEPROM_IMG_SIZE,
+ .num_of_queues = IL49_NUM_QUEUES,
+ .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = true,
+ .led_compensation = 61,
+ .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+ .temperature_kelvin = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
+ },
+
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index f280e0161b17..1db677689cfe 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -38,17 +38,16 @@ struct il_rxon_context;
/* configuration for the _4965 devices */
extern struct il_cfg il4965_cfg;
+extern const struct il_ops il4965_ops;
extern struct il_mod_params il4965_mod_params;
-extern struct ieee80211_ops il4965_hw_ops;
-
/* tx queue */
void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
int freed);
/* RXON */
-void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+void il4965_set_rxon_chain(struct il_priv *il);
/* uCode */
int il4965_verify_ucode(struct il_priv *il);
@@ -61,6 +60,8 @@ int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
int il4965_hw_nic_init(struct il_priv *il);
int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+void il4965_nic_config(struct il_priv *il);
+
/* rx */
void il4965_rx_queue_restock(struct il_priv *il);
void il4965_rx_replenish(struct il_priv *il);
@@ -68,8 +69,6 @@ void il4965_rx_replenish_now(struct il_priv *il);
void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
int il4965_rxq_stop(struct il_priv *il);
int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
-void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
void il4965_rx_handle(struct il_priv *il);
/* tx */
@@ -85,7 +84,6 @@ int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
-void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
void il4965_hw_txq_ctx_free(struct il_priv *il);
int il4965_txq_ctx_alloc(struct il_priv *il);
@@ -107,12 +105,6 @@ void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
int tx_fifo_id, int scd_retry);
-/* rx */
-void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
-bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
-void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
-void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-
/* scan */
int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
@@ -134,21 +126,18 @@ il4965_get_tx_fail_reason(u32 status)
#endif
/* station management */
-int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
-int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r);
+int il4965_alloc_bcast_station(struct il_priv *il);
+int il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r);
int il4965_remove_default_wep_key(struct il_priv *il,
- struct il_rxon_context *ctx,
struct ieee80211_key_conf *key);
-int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+int il4965_set_default_wep_key(struct il_priv *il,
struct ieee80211_key_conf *key);
-int il4965_restore_default_wep_keys(struct il_priv *il,
- struct il_rxon_context *ctx);
-int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+int il4965_restore_default_wep_keys(struct il_priv *il);
+int il4965_set_dynamic_key(struct il_priv *il,
struct ieee80211_key_conf *key, u8 sta_id);
-int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+int il4965_remove_dynamic_key(struct il_priv *il,
struct ieee80211_key_conf *key, u8 sta_id);
-void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+void il4965_update_tkip_key(struct il_priv *il,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
@@ -279,6 +268,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
((t) < IL_TX_POWER_TEMPERATURE_MIN || \
(t) > IL_TX_POWER_TEMPERATURE_MAX)
+extern void il4965_temperature_calib(struct il_priv *il);
/********************* END TEMPERATURE ***************************************/
/********************* START TXPOWER *****************************************/
@@ -937,17 +927,10 @@ void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
void il4965_init_sensitivity(struct il_priv *il);
void il4965_reset_run_time_calib(struct il_priv *il);
-void il4965_calib_free_results(struct il_priv *il);
/* Debug */
#ifdef CONFIG_IWLEGACY_DEBUGFS
-ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t il4965_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos);
+extern const struct il_debugfs_ops il4965_debugfs_ops;
#endif
/****************************/
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index 05bd375cb845..fb919727b8bb 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -6,45 +6,6 @@ config IWLEGACY
select LEDS_TRIGGERS
select MAC80211_LEDS
-menu "Debugging Options"
- depends on IWLEGACY
-
-config IWLEGACY_DEBUG
- bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
- depends on IWLEGACY
- ---help---
- This option will enable debug tracing output for the iwlegacy
- drivers.
-
- This will result in the kernel module being ~100k larger. You can
- control which debug output is sent to the kernel log by setting the
- value in
-
- /sys/class/net/wlan0/device/debug_level
-
- This entry will only exist if this option is enabled.
-
- To set a value, simply echo an 8-byte hex value to the same file:
-
- % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
-
- You can find the list of debug mask values in:
- drivers/net/wireless/iwlegacy/common.h
-
- If this is your first time using this driver, you should say Y here
- as the debug information can assist others in helping you resolve
- any problems you may encounter.
-
-config IWLEGACY_DEBUGFS
- bool "iwlegacy (iwl 3945/4965) debugfs support"
- depends on IWLEGACY && MAC80211_DEBUGFS
- ---help---
- Enable creation of debugfs files for the iwlegacy drivers. This
- is a low-impact option that allows getting insight into the
- driver's state at runtime.
-
-endmenu
-
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
@@ -98,3 +59,42 @@ config IWL3945
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwl3945.
+
+menu "iwl3945 / iwl4965 Debugging Options"
+ depends on IWLEGACY
+
+config IWLEGACY_DEBUG
+ bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+ depends on IWLEGACY
+ ---help---
+ This option will enable debug tracing output for the iwlegacy
+ drivers.
+
+ This will result in the kernel module being ~100k larger. You can
+ control which debug output is sent to the kernel log by setting the
+ value in
+
+ /sys/class/net/wlan0/device/debug_level
+
+ This entry will only exist if this option is enabled.
+
+ To set a value, simply echo an 8-byte hex value to the same file:
+
+ % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+ You can find the list of debug mask values in:
+ drivers/net/wireless/iwlegacy/common.h
+
+ If this is your first time using this driver, you should say Y here
+ as the debug information can assist others in helping you resolve
+ any problems you may encounter.
+
+config IWLEGACY_DEBUGFS
+ bool "iwlegacy (iwl 3945/4965) debugfs support"
+ depends on IWLEGACY && MAC80211_DEBUGFS
+ ---help---
+ Enable creation of debugfs files for the iwlegacy drivers. This
+ is a low-impact option that allows getting insight into the
+ driver's state at runtime.
+
+endmenu
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 36454d0bbeed..e5ac04739bcc 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -81,7 +81,7 @@ il_clear_bit(struct il_priv *p, u32 r, u32 m)
}
EXPORT_SYMBOL(il_clear_bit);
-int
+bool
_il_grab_nic_access(struct il_priv *il)
{
int ret;
@@ -111,14 +111,15 @@ _il_grab_nic_access(struct il_priv *il)
_il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
val = _il_rd(il, CSR_GP_CNTRL);
- IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ WARN_ONCE(1, "Timeout waiting for ucode processor access "
+ "(CSR_GP_CNTRL 0x%08x)\n", val);
_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
+ return false;
}
- return 0;
+ return true;
}
EXPORT_SYMBOL_GPL(_il_grab_nic_access);
@@ -160,7 +161,7 @@ il_wr_prph(struct il_priv *il, u32 addr, u32 val)
unsigned long reg_flags;
spin_lock_irqsave(&il->reg_lock, reg_flags);
- if (!_il_grab_nic_access(il)) {
+ if (likely(_il_grab_nic_access(il))) {
_il_wr_prph(il, addr, val);
_il_release_nic_access(il);
}
@@ -178,7 +179,6 @@ il_read_targ_mem(struct il_priv *il, u32 addr)
_il_grab_nic_access(il);
_il_wr(il, HBUS_TARG_MEM_RADDR, addr);
- rmb();
value = _il_rd(il, HBUS_TARG_MEM_RDAT);
_il_release_nic_access(il);
@@ -193,9 +193,8 @@ il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
unsigned long reg_flags;
spin_lock_irqsave(&il->reg_lock, reg_flags);
- if (!_il_grab_nic_access(il)) {
+ if (likely(_il_grab_nic_access(il))) {
_il_wr(il, HBUS_TARG_MEM_WADDR, addr);
- wmb();
_il_wr(il, HBUS_TARG_MEM_WDAT, val);
_il_release_nic_access(il);
}
@@ -351,7 +350,7 @@ il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
}
}
- if (test_bit(S_RF_KILL_HW, &il->status)) {
+ if (test_bit(S_RFKILL, &il->status)) {
IL_ERR("Command %s aborted: RF KILL Switch\n",
il_get_cmd_string(cmd->id));
ret = -ECANCELED;
@@ -512,15 +511,15 @@ il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
}
D_LED("Led blink time compensation=%u\n",
- il->cfg->base_params->led_compensation);
+ il->cfg->led_compensation);
led_cmd.on =
il_blink_compensation(il, on,
- il->cfg->base_params->led_compensation);
+ il->cfg->led_compensation);
led_cmd.off =
il_blink_compensation(il, off,
- il->cfg->base_params->led_compensation);
+ il->cfg->led_compensation);
- ret = il->cfg->ops->led->cmd(il, &led_cmd);
+ ret = il->ops->send_led_cmd(il, &led_cmd);
if (!ret) {
il->blink_on = on;
il->blink_off = off;
@@ -691,7 +690,7 @@ il_eeprom_verify_signature(struct il_priv *il)
const u8 *
il_eeprom_query_addr(const struct il_priv *il, size_t offset)
{
- BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+ BUG_ON(offset >= il->cfg->eeprom_size);
return &il->eeprom[offset];
}
EXPORT_SYMBOL(il_eeprom_query_addr);
@@ -722,7 +721,7 @@ il_eeprom_init(struct il_priv *il)
u16 addr;
/* allocate eeprom */
- sz = il->cfg->base_params->eeprom_size;
+ sz = il->cfg->eeprom_size;
D_EEPROM("NVM size = %d\n", sz);
il->eeprom = kzalloc(sz, GFP_KERNEL);
if (!il->eeprom) {
@@ -731,7 +730,7 @@ il_eeprom_init(struct il_priv *il)
}
e = (__le16 *) il->eeprom;
- il->cfg->ops->lib->apm_ops.init(il);
+ il->ops->apm_init(il);
ret = il_eeprom_verify_signature(il);
if (ret < 0) {
@@ -741,7 +740,7 @@ il_eeprom_init(struct il_priv *il)
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+ ret = il->ops->eeprom_acquire_semaphore(il);
if (ret < 0) {
IL_ERR("Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
@@ -773,7 +772,7 @@ il_eeprom_init(struct il_priv *il)
ret = 0;
done:
- il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+ il->ops->eeprom_release_semaphore(il);
err:
if (ret)
@@ -799,8 +798,8 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
const struct il_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_idx)
{
- u32 offset =
- il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+ u32 offset = il->cfg->regulatory_bands[eep_band - 1];
+
switch (eep_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
@@ -1001,10 +1000,8 @@ il_init_channel_map(struct il_priv *il)
}
/* Check if we do have HT40 channels */
- if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
+ if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
+ il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
return 0;
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
@@ -1158,9 +1155,9 @@ il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
clear_bit(S_POWER_PMI, &il->status);
- if (il->cfg->ops->lib->update_chain_flags && update_chains)
- il->cfg->ops->lib->update_chain_flags(il);
- else if (il->cfg->ops->lib->update_chain_flags)
+ if (il->ops->update_chain_flags && update_chains)
+ il->ops->update_chain_flags(il);
+ else if (il->ops->update_chain_flags)
D_POWER("Cannot update the power, chain noise "
"calibration running: %d\n",
il->chain_noise_data.state);
@@ -1442,7 +1439,6 @@ u16
il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
struct ieee80211_vif *vif)
{
- struct il_rxon_context *ctx = &il->ctx;
u16 value;
u16 passive =
@@ -1457,7 +1453,7 @@ il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
* dwell time to be 98% of the smallest beacon interval
* (minus 2 * channel tune time)
*/
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ value = il->vif ? il->vif->bss_conf.beacon_int : 0;
if (value > IL_PASSIVE_DWELL_BASE || !value)
value = IL_PASSIVE_DWELL_BASE;
value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
@@ -1486,9 +1482,6 @@ il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
lockdep_assert_held(&il->mutex);
- if (WARN_ON(!il->cfg->ops->utils->request_scan))
- return -EOPNOTSUPP;
-
cancel_delayed_work(&il->scan_check);
if (!il_is_ready_rf(il)) {
@@ -1511,7 +1504,7 @@ il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
set_bit(S_SCANNING, &il->status);
il->scan_start = jiffies;
- ret = il->cfg->ops->utils->request_scan(il, vif);
+ ret = il->ops->request_scan(il, vif);
if (ret) {
clear_bit(S_SCANNING, &il->status);
return ret;
@@ -1530,12 +1523,13 @@ il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct il_priv *il = hw->priv;
int ret;
- D_MAC80211("enter\n");
-
- if (req->n_channels == 0)
+ if (req->n_channels == 0) {
+ IL_ERR("Can not scan on no channels.\n");
return -EINVAL;
+ }
mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
if (test_bit(S_SCANNING, &il->status)) {
D_SCAN("Scan already in progress.\n");
@@ -1550,9 +1544,8 @@ il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ret = il_scan_initiate(il, vif);
- D_MAC80211("leave\n");
-
out_unlock:
+ D_MAC80211("leave ret %d\n", ret);
mutex_unlock(&il->mutex);
return ret;
@@ -1673,7 +1666,7 @@ out_settings:
il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
il_set_tx_power(il, il->tx_power_next, false);
- il->cfg->ops->utils->post_scan(il);
+ il->ops->post_scan(il);
out:
mutex_unlock(&il->mutex);
@@ -1815,7 +1808,7 @@ il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
might_sleep();
}
- cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ cmd.len = il->ops->build_addsta_hcmd(sta, data);
ret = il_send_cmd(il, &cmd);
if (ret || (flags & CMD_ASYNC))
@@ -1832,8 +1825,7 @@ il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
EXPORT_SYMBOL(il_send_add_sta);
static void
-il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
- struct il_rxon_context *ctx)
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
__le32 sta_flags;
@@ -1874,7 +1866,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
cpu_to_le32((u32) sta_ht_inf->
ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
sta_flags |= STA_FLG_HT40_EN_MSK;
else
sta_flags &= ~STA_FLG_HT40_EN_MSK;
@@ -1890,8 +1882,8 @@ done:
* should be called with sta_lock held
*/
u8
-il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta)
{
struct il_station_entry *station;
int i;
@@ -1899,9 +1891,9 @@ il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
u16 rate;
if (is_ap)
- sta_id = ctx->ap_sta_id;
+ sta_id = IL_AP_ID;
else if (is_broadcast_ether_addr(addr))
- sta_id = ctx->bcast_sta_id;
+ sta_id = il->hw_params.bcast_id;
else
for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
if (!compare_ether_addr
@@ -1950,22 +1942,14 @@ il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
memcpy(station->sta.sta.addr, addr, ETH_ALEN);
station->sta.mode = 0;
station->sta.sta.sta_id = sta_id;
- station->sta.station_flags = ctx->station_flags;
- station->ctxid = ctx->ctxid;
-
- if (sta) {
- struct il_station_priv_common *sta_priv;
-
- sta_priv = (void *)sta->drv_priv;
- sta_priv->ctx = ctx;
- }
+ station->sta.station_flags = 0;
/*
* OK to call unconditionally, since local stations (IBSS BSSID
* STA and broadcast STA) pass in a NULL sta, and mac80211
* doesn't allow HT IBSS.
*/
- il_set_ht_add_station(il, sta_id, sta, ctx);
+ il_set_ht_add_station(il, sta_id, sta);
/* 3945 only */
rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
@@ -1983,9 +1967,8 @@ EXPORT_SYMBOL_GPL(il_prep_station);
* il_add_station_common -
*/
int
-il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
- u8 *sta_id_r)
+il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r)
{
unsigned long flags_spin;
int ret = 0;
@@ -1994,7 +1977,7 @@ il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
*sta_id_r = 0;
spin_lock_irqsave(&il->sta_lock, flags_spin);
- sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+ sta_id = il_prep_station(il, addr, is_ap, sta);
if (sta_id == IL_INVALID_STATION) {
IL_ERR("Unable to prepare station %pM for addition\n", addr);
spin_unlock_irqrestore(&il->sta_lock, flags_spin);
@@ -2181,7 +2164,7 @@ EXPORT_SYMBOL_GPL(il_remove_station);
* the ucode, e.g. unassociated RXON.
*/
void
-il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+il_clear_ucode_stations(struct il_priv *il)
{
int i;
unsigned long flags_spin;
@@ -2191,9 +2174,6 @@ il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
spin_lock_irqsave(&il->sta_lock, flags_spin);
for (i = 0; i < il->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != il->stations[i].ctxid)
- continue;
-
if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
D_INFO("Clearing ucode active for station %d\n", i);
il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
@@ -2216,7 +2196,7 @@ EXPORT_SYMBOL(il_clear_ucode_stations);
* Function sleeps.
*/
void
-il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+il_restore_stations(struct il_priv *il)
{
struct il_addsta_cmd sta_cmd;
struct il_link_quality_cmd lq;
@@ -2234,8 +2214,6 @@ il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
D_ASSOC("Restoring all known stations ... start.\n");
spin_lock_irqsave(&il->sta_lock, flags_spin);
for (i = 0; i < il->hw_params.max_stations; i++) {
- if (ctx->ctxid != il->stations[i].ctxid)
- continue;
if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
!(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
D_ASSOC("Restoring sta %pM\n",
@@ -2273,7 +2251,7 @@ il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
* current LQ command
*/
if (send_lq)
- il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+ il_send_lq_cmd(il, &lq, CMD_SYNC, true);
spin_lock_irqsave(&il->sta_lock, flags_spin);
il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
}
@@ -2353,15 +2331,14 @@ il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
* RXON flags are updated and when LQ command is updated.
*/
static bool
-il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
- struct il_link_quality_cmd *lq)
+il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
{
int i;
- if (ctx->ht.enabled)
+ if (il->ht.enabled)
return true;
- D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+ D_INFO("Channel %u is not an HT channel\n", il->active.channel);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
D_INFO("idx %d of LQ expects HT channel\n", i);
@@ -2382,8 +2359,8 @@ il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
* progress.
*/
int
-il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
- struct il_link_quality_cmd *lq, u8 flags, bool init)
+il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
+ u8 flags, bool init)
{
int ret = 0;
unsigned long flags_spin;
@@ -2408,7 +2385,7 @@ il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
il_dump_lq_cmd(il, lq);
BUG_ON(init && (cmd.flags & CMD_ASYNC));
- if (il_is_lq_table_valid(il, ctx, lq))
+ if (il_is_lq_table_valid(il, lq))
ret = il_send_cmd(il, &cmd);
else
ret = -EINVAL;
@@ -2436,13 +2413,16 @@ il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
int ret;
- D_INFO("received request to remove station %pM\n", sta->addr);
mutex_lock(&il->mutex);
- D_INFO("proceeding to remove station %pM\n", sta->addr);
+ D_MAC80211("enter station %pM\n", sta->addr);
+
ret = il_remove_station(il, sta_common->sta_id, sta->addr);
if (ret)
IL_ERR("Error removing station %pM\n", sta->addr);
+
+ D_MAC80211("leave ret %d\n", ret);
mutex_unlock(&il->mutex);
+
return ret;
}
EXPORT_SYMBOL(il_mac_sta_remove);
@@ -2648,7 +2628,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
* All contexts have the same setting here due to it being
* a module parameter, so OK to check any context.
*/
- if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+ if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
return 0;
if (!(fc & IEEE80211_FCTL_PROTECTED))
@@ -2739,7 +2719,7 @@ il_tx_queue_unmap(struct il_priv *il, int txq_id)
return;
while (q->write_ptr != q->read_ptr) {
- il->cfg->ops->lib->txq_free_tfd(il, txq);
+ il->ops->txq_free_tfd(il, txq);
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
}
}
@@ -2772,8 +2752,8 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
txq->tfds, txq->q.dma_addr);
/* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
+ kfree(txq->skbs);
+ txq->skbs = NULL;
/* deallocate arrays */
kfree(txq->cmd);
@@ -2907,20 +2887,22 @@ EXPORT_SYMBOL(il_queue_space);
* il_queue_init - Initialize queue's high/low-water and read/write idxes
*/
static int
-il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
- u32 id)
+il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
{
- q->n_bd = count;
- q->n_win = slots_num;
- q->id = id;
+ /*
+ * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * il_queue_inc_wrap and il_queue_dec_wrap are broken.
+ */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+ /* FIXME: remove q->n_bd */
+ q->n_bd = TFD_QUEUE_SIZE_MAX;
- /* count must be power-of-two size, otherwise il_queue_inc_wrap
- * and il_queue_dec_wrap are broken. */
- BUG_ON(!is_power_of_2(count));
+ q->n_win = slots;
+ q->id = id;
- /* slots_num must be power-of-two size, otherwise
+ /* slots_must be power-of-two size, otherwise
* il_get_cmd_idx is broken. */
- BUG_ON(!is_power_of_2(slots_num));
+ BUG_ON(!is_power_of_2(slots));
q->low_mark = q->n_win / 4;
if (q->low_mark < 4)
@@ -2947,23 +2929,21 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
/* Driver ilate data, only for Tx (not command) queues,
* not shared with device. */
if (id != il->cmd_queue) {
- txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
- GFP_KERNEL);
- if (!txq->txb) {
- IL_ERR("kmalloc for auxiliary BD "
- "structures failed\n");
+ txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(struct skb *),
+ GFP_KERNEL);
+ if (!txq->skbs) {
+ IL_ERR("Fail to alloc skbs\n");
goto error;
}
- } else {
- txq->txb = NULL;
- }
+ } else
+ txq->skbs = NULL;
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds =
dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
if (!txq->tfds) {
- IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ IL_ERR("Fail to alloc TFDs\n");
goto error;
}
txq->q.id = id;
@@ -2971,8 +2951,8 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
return 0;
error:
- kfree(txq->txb);
- txq->txb = NULL;
+ kfree(txq->skbs);
+ txq->skbs = NULL;
return -ENOMEM;
}
@@ -2981,12 +2961,11 @@ error:
* il_tx_queue_init - Allocate and initialize one tx/cmd queue
*/
int
-il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
- u32 txq_id)
+il_tx_queue_init(struct il_priv *il, u32 txq_id)
{
- int i, len;
- int ret;
- int actual_slots = slots_num;
+ int i, len, ret;
+ int slots, actual_slots;
+ struct il_tx_queue *txq = &il->txq[txq_id];
/*
* Alloc buffer array for commands (Tx or other types of commands).
@@ -2996,8 +2975,13 @@ il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
* For normal Tx queues (all other queues), no super-size command
* space is needed.
*/
- if (txq_id == il->cmd_queue)
- actual_slots++;
+ if (txq_id == il->cmd_queue) {
+ slots = TFD_CMD_SLOTS;
+ actual_slots = slots + 1;
+ } else {
+ slots = TFD_TX_CMD_SLOTS;
+ actual_slots = slots;
+ }
txq->meta =
kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
@@ -3010,7 +2994,7 @@ il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
len = sizeof(struct il_device_cmd);
for (i = 0; i < actual_slots; i++) {
/* only happens for cmd queue */
- if (i == slots_num)
+ if (i == slots)
len = IL_MAX_CMD_SIZE;
txq->cmd[i] = kmalloc(len, GFP_KERNEL);
@@ -3033,15 +3017,11 @@ il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
if (txq_id < 4)
il_set_swq_id(txq, txq_id, txq_id);
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
/* Initialize queue's high/low-water marks, and head/tail idxes */
- il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+ il_queue_init(il, &txq->q, slots, txq_id);
/* Tell device where to find queue */
- il->cfg->ops->lib->txq_init(il, txq);
+ il->ops->txq_init(il, txq);
return 0;
err:
@@ -3056,23 +3036,27 @@ out_free_arrays:
EXPORT_SYMBOL(il_tx_queue_init);
void
-il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
- u32 txq_id)
+il_tx_queue_reset(struct il_priv *il, u32 txq_id)
{
- int actual_slots = slots_num;
+ int slots, actual_slots;
+ struct il_tx_queue *txq = &il->txq[txq_id];
- if (txq_id == il->cmd_queue)
- actual_slots++;
+ if (txq_id == il->cmd_queue) {
+ slots = TFD_CMD_SLOTS;
+ actual_slots = TFD_CMD_SLOTS + 1;
+ } else {
+ slots = TFD_TX_CMD_SLOTS;
+ actual_slots = TFD_TX_CMD_SLOTS;
+ }
memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
-
txq->need_update = 0;
/* Initialize queue's high/low-water marks, and head/tail idxes */
- il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+ il_queue_init(il, &txq->q, slots, txq_id);
/* Tell device where to find queue */
- il->cfg->ops->lib->txq_init(il, txq);
+ il->ops->txq_init(il, txq);
}
EXPORT_SYMBOL(il_tx_queue_reset);
@@ -3100,7 +3084,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
u32 idx;
u16 fix_size;
- cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
/* If any of the command structures end up being larger than
@@ -3179,9 +3163,9 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
#endif
txq->need_update = 1;
- if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ if (il->ops->txq_update_byte_cnt_tbl)
/* Set up entry in queue's byte count circular buffer */
- il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+ il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
phys_addr =
pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
@@ -3189,8 +3173,8 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, fix_size);
- il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
- 1, U32_PAD(cmd->len));
+ il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
+ U32_PAD(cmd->len));
/* Increment and update queue's write idx */
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -3332,30 +3316,6 @@ EXPORT_SYMBOL(il_debug_level);
const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
EXPORT_SYMBOL(il_bcast_addr);
-/* This function both allocates and initializes hw and il. */
-struct ieee80211_hw *
-il_alloc_all(struct il_cfg *cfg)
-{
- struct il_priv *il;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's ilate structure */
- struct ieee80211_hw *hw;
-
- hw = ieee80211_alloc_hw(sizeof(struct il_priv),
- cfg->ops->ieee80211_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n", cfg->name);
- goto out;
- }
-
- il = hw->priv;
- il->hw = hw;
-
-out:
- return hw;
-}
-EXPORT_SYMBOL(il_alloc_all);
-
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
static void
@@ -3562,10 +3522,9 @@ il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
}
bool
-il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
+il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
{
- if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ if (!il->ht.enabled || !il->ht.is_40mhz)
return false;
/*
@@ -3581,8 +3540,8 @@ il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
#endif
return il_is_channel_extension(il, il->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
+ le16_to_cpu(il->staging.channel),
+ il->ht.extension_chan_offset);
}
EXPORT_SYMBOL(il_is_ht40_tx_allowed);
@@ -3621,22 +3580,22 @@ il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
}
int
-il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+il_send_rxon_timing(struct il_priv *il)
{
u64 tsf;
s32 interval_tm, rem;
struct ieee80211_conf *conf = NULL;
u16 beacon_int;
- struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_vif *vif = il->vif;
conf = &il->hw->conf;
lockdep_assert_held(&il->mutex);
- memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+ memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
- ctx->timing.timestamp = cpu_to_le64(il->timestamp);
- ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+ il->timing.timestamp = cpu_to_le64(il->timestamp);
+ il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
beacon_int = vif ? vif->bss_conf.beacon_int : 0;
@@ -3644,36 +3603,35 @@ il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
* TODO: For IBSS we need to get atim_win from mac80211,
* for now just always use 0
*/
- ctx->timing.atim_win = 0;
+ il->timing.atim_win = 0;
beacon_int =
il_adjust_beacon_interval(beacon_int,
il->hw_params.max_beacon_itrvl *
TIME_UNIT);
- ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+ il->timing.beacon_interval = cpu_to_le16(beacon_int);
tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
interval_tm = beacon_int * TIME_UNIT;
rem = do_div(tsf, interval_tm);
- ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+ il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
- ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+ il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
- le16_to_cpu(ctx->timing.beacon_interval),
- le32_to_cpu(ctx->timing.beacon_init_val),
- le16_to_cpu(ctx->timing.atim_win));
+ le16_to_cpu(il->timing.beacon_interval),
+ le32_to_cpu(il->timing.beacon_init_val),
+ le16_to_cpu(il->timing.atim_win));
- return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
- &ctx->timing);
+ return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
+ &il->timing);
}
EXPORT_SYMBOL(il_send_rxon_timing);
void
-il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
- int hw_decrypt)
+il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
{
- struct il_rxon_cmd *rxon = &ctx->staging;
+ struct il_rxon_cmd *rxon = &il->staging;
if (hw_decrypt)
rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@ -3685,9 +3643,9 @@ EXPORT_SYMBOL(il_set_rxon_hwcrypto);
/* validate RXON structure is valid */
int
-il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+il_check_rxon_cmd(struct il_priv *il)
{
- struct il_rxon_cmd *rxon = &ctx->staging;
+ struct il_rxon_cmd *rxon = &il->staging;
bool error = false;
if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
@@ -3765,10 +3723,10 @@ EXPORT_SYMBOL(il_check_rxon_cmd);
* a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
*/
int
-il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+il_full_rxon_required(struct il_priv *il)
{
- const struct il_rxon_cmd *staging = &ctx->staging;
- const struct il_rxon_cmd *active = &ctx->active;
+ const struct il_rxon_cmd *staging = &il->staging;
+ const struct il_rxon_cmd *active = &il->active;
#define CHK(cond) \
if ((cond)) { \
@@ -3785,7 +3743,7 @@ il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
}
/* These items are only settable from the full RXON command */
- CHK(!il_is_associated_ctx(ctx));
+ CHK(!il_is_associated(il));
CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
CHK(compare_ether_addr(staging->node_addr, active->node_addr));
CHK(compare_ether_addr
@@ -3819,13 +3777,13 @@ il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
EXPORT_SYMBOL(il_full_rxon_required);
u8
-il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+il_get_lowest_plcp(struct il_priv *il)
{
/*
* Assign the lowest rate -- should really get this from
* the beacon skb from mac80211.
*/
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
return RATE_1M_PLCP;
else
return RATE_6M_PLCP;
@@ -3833,12 +3791,11 @@ il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
EXPORT_SYMBOL(il_get_lowest_plcp);
static void
-_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
- struct il_rxon_context *ctx)
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
{
- struct il_rxon_cmd *rxon = &ctx->staging;
+ struct il_rxon_cmd *rxon = &il->staging;
- if (!ctx->ht.enabled) {
+ if (!il->ht.enabled) {
rxon->flags &=
~(RXON_FLG_CHANNEL_MODE_MSK |
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
@@ -3847,19 +3804,19 @@ _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
}
rxon->flags |=
- cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+ cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
/* Set up channel bandwidth:
* 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
/* clear the HT channel mode before set the mode */
rxon->flags &=
~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+ if (il_is_ht40_tx_allowed(il, NULL)) {
/* pure ht40 */
- if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
/* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
+ switch (il->ht.extension_chan_offset) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
rxon->flags &=
~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
@@ -3870,7 +3827,7 @@ _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
}
} else {
/* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
+ switch (il->ht.extension_chan_offset) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
rxon->flags &=
~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
@@ -3891,18 +3848,18 @@ _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
}
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
D_ASSOC("rxon flags 0x%X operation mode :0x%X "
"extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
- ctx->ht.protection, ctx->ht.extension_chan_offset);
+ il->ht.protection, il->ht.extension_chan_offset);
}
void
il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
{
- _il_set_rxon_ht(il, ht_conf, &il->ctx);
+ _il_set_rxon_ht(il, ht_conf);
}
EXPORT_SYMBOL(il_set_rxon_ht);
@@ -3925,7 +3882,7 @@ il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
for (i = min; i < max; i++) {
channel = il->channel_info[i].channel;
- if (channel == le16_to_cpu(il->ctx.staging.channel))
+ if (channel == le16_to_cpu(il->staging.channel))
continue;
ch_info = il_get_channel_info(il, band, channel);
@@ -3945,20 +3902,19 @@ EXPORT_SYMBOL(il_get_single_channel_number);
* in the staging RXON flag structure based on the ch->band
*/
int
-il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
- struct il_rxon_context *ctx)
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
{
enum ieee80211_band band = ch->band;
u16 channel = ch->hw_value;
- if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+ if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
return 0;
- ctx->staging.channel = cpu_to_le16(channel);
+ il->staging.channel = cpu_to_le16(channel);
if (band == IEEE80211_BAND_5GHZ)
- ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
else
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ il->staging.flags |= RXON_FLG_BAND_24G_MSK;
il->band = band;
@@ -3969,24 +3925,24 @@ il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
EXPORT_SYMBOL(il_set_rxon_channel);
void
-il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
- enum ieee80211_band band, struct ieee80211_vif *vif)
+il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
if (band == IEEE80211_BAND_5GHZ) {
- ctx->staging.flags &=
+ il->staging.flags &=
~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
RXON_FLG_CCK_MSK);
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
} else {
/* Copied from il_post_associate() */
if (vif && vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
- ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
- ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ il->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ il->staging.flags &= ~RXON_FLG_CCK_MSK;
}
}
EXPORT_SYMBOL(il_set_flags_for_band);
@@ -3995,69 +3951,60 @@ EXPORT_SYMBOL(il_set_flags_for_band);
* initialize rxon structure with default values from eeprom
*/
void
-il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+il_connection_init_rx_config(struct il_priv *il)
{
const struct il_channel_info *ch_info;
- memset(&ctx->staging, 0, sizeof(ctx->staging));
-
- if (!ctx->vif) {
- ctx->staging.dev_type = ctx->unused_devtype;
- } else
- switch (ctx->vif->type) {
-
- case NL80211_IFTYPE_STATION:
- ctx->staging.dev_type = ctx->station_devtype;
- ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- case NL80211_IFTYPE_ADHOC:
- ctx->staging.dev_type = ctx->ibss_devtype;
- ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
- ctx->staging.filter_flags =
- RXON_FILTER_BCON_AWARE_MSK |
- RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- default:
- IL_ERR("Unsupported interface type %d\n",
- ctx->vif->type);
- break;
- }
+ memset(&il->staging, 0, sizeof(il->staging));
+
+ if (!il->vif) {
+ il->staging.dev_type = RXON_DEV_TYPE_ESS;
+ } else if (il->vif->type == NL80211_IFTYPE_STATION) {
+ il->staging.dev_type = RXON_DEV_TYPE_ESS;
+ il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
+ il->staging.dev_type = RXON_DEV_TYPE_IBSS;
+ il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.filter_flags =
+ RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
+ } else {
+ IL_ERR("Unsupported interface type %d\n", il->vif->type);
+ return;
+ }
#if 0
/* TODO: Figure out when short_preamble would be set and cache from
* that */
if (!hw_to_local(il->hw)->short_preamble)
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
else
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
#endif
ch_info =
- il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+ il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
if (!ch_info)
ch_info = &il->channel_info[0];
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ il->staging.channel = cpu_to_le16(ch_info->channel);
il->band = ch_info->band;
- il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+ il_set_flags_for_band(il, il->band, il->vif);
- ctx->staging.ofdm_basic_rates =
+ il->staging.ofdm_basic_rates =
(IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
- ctx->staging.cck_basic_rates =
+ il->staging.cck_basic_rates =
(IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
/* clear both MIX and PURE40 mode flag */
- ctx->staging.flags &=
+ il->staging.flags &=
~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
- if (ctx->vif)
- memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+ if (il->vif)
+ memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
- ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
- ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+ il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
}
EXPORT_SYMBOL(il_connection_init_rx_config);
@@ -4084,10 +4031,10 @@ il_set_rate(struct il_priv *il)
D_RATE("Set active_rate = %0x\n", il->active_rate);
- il->ctx.staging.cck_basic_rates =
+ il->staging.cck_basic_rates =
(IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
- il->ctx.staging.ofdm_basic_rates =
+ il->staging.ofdm_basic_rates =
(IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
}
EXPORT_SYMBOL(il_set_rate);
@@ -4095,13 +4042,11 @@ EXPORT_SYMBOL(il_set_rate);
void
il_chswitch_done(struct il_priv *il, bool is_success)
{
- struct il_rxon_context *ctx = &il->ctx;
-
if (test_bit(S_EXIT_PENDING, &il->status))
return;
if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
- ieee80211_chswitch_done(ctx->vif, is_success);
+ ieee80211_chswitch_done(il->vif, is_success);
}
EXPORT_SYMBOL(il_chswitch_done);
@@ -4110,16 +4055,14 @@ il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_csa_notification *csa = &(pkt->u.csa_notif);
-
- struct il_rxon_context *ctx = &il->ctx;
- struct il_rxon_cmd *rxon = (void *)&ctx->active;
+ struct il_rxon_cmd *rxon = (void *)&il->active;
if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
return;
if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
+ il->staging.channel = csa->channel;
D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
il_chswitch_done(il, true);
} else {
@@ -4132,9 +4075,9 @@ EXPORT_SYMBOL(il_hdl_csa);
#ifdef CONFIG_IWLEGACY_DEBUG
void
-il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+il_print_rx_config_cmd(struct il_priv *il)
{
- struct il_rxon_cmd *rxon = &ctx->staging;
+ struct il_rxon_cmd *rxon = &il->staging;
D_RADIO("RX CONFIG:\n");
il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
@@ -4164,12 +4107,12 @@ il_irq_handle_error(struct il_priv *il)
IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
- il->cfg->ops->lib->dump_nic_error_log(il);
- if (il->cfg->ops->lib->dump_fh)
- il->cfg->ops->lib->dump_fh(il, NULL, false);
+ il->ops->dump_nic_error_log(il);
+ if (il->ops->dump_fh)
+ il->ops->dump_fh(il, NULL, false);
#ifdef CONFIG_IWLEGACY_DEBUG
if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
- il_print_rx_config_cmd(il, &il->ctx);
+ il_print_rx_config_cmd(il);
#endif
wake_up(&il->wait_command_queue);
@@ -4189,17 +4132,17 @@ il_irq_handle_error(struct il_priv *il)
EXPORT_SYMBOL(il_irq_handle_error);
static int
-il_apm_stop_master(struct il_priv *il)
+_il_apm_stop_master(struct il_priv *il)
{
int ret = 0;
/* stop device's busmaster DMA activity */
- il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+ _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
ret =
_il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- if (ret)
+ if (ret < 0)
IL_WARN("Master Disable Timed Out, 100 usec\n");
D_INFO("stop master\n");
@@ -4208,15 +4151,17 @@ il_apm_stop_master(struct il_priv *il)
}
void
-il_apm_stop(struct il_priv *il)
+_il_apm_stop(struct il_priv *il)
{
+ lockdep_assert_held(&il->reg_lock);
+
D_INFO("Stop card, put in low power state\n");
/* Stop device's DMA activity */
- il_apm_stop_master(il);
+ _il_apm_stop_master(il);
/* Reset the entire device */
- il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
@@ -4224,7 +4169,18 @@ il_apm_stop(struct il_priv *il)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(_il_apm_stop);
+
+void
+il_apm_stop(struct il_priv *il)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->reg_lock, flags);
+ _il_apm_stop(il);
+ spin_unlock_irqrestore(&il->reg_lock, flags);
}
EXPORT_SYMBOL(il_apm_stop);
@@ -4276,7 +4232,7 @@ il_apm_init(struct il_priv *il)
* If not (unlikely), enable L0S, so there is at least some
* power savings, even without L1.
*/
- if (il->cfg->base_params->set_l0s) {
+ if (il->cfg->set_l0s) {
lctl = il_pcie_link_ctl(il);
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
@@ -4293,9 +4249,9 @@ il_apm_init(struct il_priv *il)
}
/* Configure analog phase-lock-loop before activating to D0A */
- if (il->cfg->base_params->pll_cfg_val)
+ if (il->cfg->pll_cfg_val)
il_set_bit(il, CSR_ANA_PLL_CFG,
- il->cfg->base_params->pll_cfg_val);
+ il->cfg->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
@@ -4325,7 +4281,7 @@ il_apm_init(struct il_priv *il)
* do not disable clocks. This preserves any hardware bits already
* set by default in "CLK_CTRL_REG" after reset.
*/
- if (il->cfg->base_params->use_bsm)
+ if (il->cfg->use_bsm)
il_wr_prph(il, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
else
@@ -4347,14 +4303,13 @@ il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
int ret;
s8 prev_tx_power;
bool defer;
- struct il_rxon_context *ctx = &il->ctx;
lockdep_assert_held(&il->mutex);
if (il->tx_power_user_lmt == tx_power && !force)
return 0;
- if (!il->cfg->ops->lib->send_tx_power)
+ if (!il->ops->send_tx_power)
return -EOPNOTSUPP;
/* 0 dBm mean 1 milliwatt */
@@ -4378,7 +4333,7 @@ il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
/* do not set tx power when scanning or channel changing */
defer = test_bit(S_SCANNING, &il->status) ||
- memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ memcmp(&il->active, &il->staging, sizeof(il->staging));
if (defer && !force) {
D_INFO("Deferring tx power set\n");
return 0;
@@ -4387,7 +4342,7 @@ il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
prev_tx_power = il->tx_power_user_lmt;
il->tx_power_user_lmt = tx_power;
- ret = il->cfg->ops->lib->send_tx_power(il);
+ ret = il->ops->send_tx_power(il);
/* if fail to set tx_power, restore the orig. tx power */
if (ret) {
@@ -4505,15 +4460,15 @@ il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
spin_lock_irqsave(&il->lock, flags);
- il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+ il->qos_data.def_qos_parm.ac[q].cw_min =
cpu_to_le16(params->cw_min);
- il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+ il->qos_data.def_qos_parm.ac[q].cw_max =
cpu_to_le16(params->cw_max);
- il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+ il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ il->qos_data.def_qos_parm.ac[q].edca_txop =
cpu_to_le16((params->txop * 32));
- il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+ il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
spin_unlock_irqrestore(&il->lock, flags);
@@ -4526,60 +4481,36 @@ int
il_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
struct il_priv *il = hw->priv;
+ int ret;
- return il->ibss_manager == IL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
-
-static int
-il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
-{
- il_connection_init_rx_config(il, ctx);
+ D_MAC80211("enter\n");
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ ret = (il->ibss_manager == IL_IBSS_MANAGER);
- return il_commit_rxon(il, ctx);
+ D_MAC80211("leave ret %d\n", ret);
+ return ret;
}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
static int
-il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+il_set_mode(struct il_priv *il)
{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
+ il_connection_init_rx_config(il);
- lockdep_assert_held(&il->mutex);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- il->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = il_set_mode(il, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- return 0;
+ return il_commit_rxon(il);
}
int
il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct il_priv *il = hw->priv;
- struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
int err;
- u32 modes;
-
- D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
mutex_lock(&il->mutex);
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
if (!il_is_ready_rf(il)) {
IL_WARN("Try to add interface when device not ready\n");
@@ -4587,32 +4518,24 @@ il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- /* check if busy context is exclusive */
- if (il->ctx.vif &&
- (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
- err = -EINVAL;
- goto out;
- }
-
- modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
- if (!(modes & BIT(vif->type))) {
+ if (il->vif) {
err = -EOPNOTSUPP;
goto out;
}
- vif_priv->ctx = &il->ctx;
- il->ctx.vif = vif;
+ il->vif = vif;
+ il->iw_mode = vif->type;
- err = il_setup_interface(il, &il->ctx);
+ err = il_set_mode(il);
if (err) {
- il->ctx.vif = NULL;
+ il->vif = NULL;
il->iw_mode = NL80211_IFTYPE_STATION;
}
out:
+ D_MAC80211("leave err %d\n", err);
mutex_unlock(&il->mutex);
- D_MAC80211("leave\n");
return err;
}
EXPORT_SYMBOL(il_mac_add_interface);
@@ -4621,8 +4544,6 @@ static void
il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
bool mode_change)
{
- struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
-
lockdep_assert_held(&il->mutex);
if (il->scan_vif == vif) {
@@ -4630,33 +4551,27 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
il_force_scan_end(il);
}
- if (!mode_change) {
- il_set_mode(il, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
+ if (!mode_change)
+ il_set_mode(il);
+
}
void
il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct il_priv *il = hw->priv;
- struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
-
- D_MAC80211("enter\n");
mutex_lock(&il->mutex);
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
- WARN_ON(ctx->vif != vif);
- ctx->vif = NULL;
+ WARN_ON(il->vif != vif);
+ il->vif = NULL;
il_teardown_interface(il, vif, false);
-
memset(il->bssid, 0, ETH_ALEN);
- mutex_unlock(&il->mutex);
D_MAC80211("leave\n");
-
+ mutex_unlock(&il->mutex);
}
EXPORT_SYMBOL(il_mac_remove_interface);
@@ -4666,7 +4581,7 @@ il_alloc_txq_mem(struct il_priv *il)
if (!il->txq)
il->txq =
kzalloc(sizeof(struct il_tx_queue) *
- il->cfg->base_params->num_of_queues, GFP_KERNEL);
+ il->cfg->num_of_queues, GFP_KERNEL);
if (!il->txq) {
IL_ERR("Not enough memory for txq\n");
return -ENOMEM;
@@ -4676,259 +4591,12 @@ il_alloc_txq_mem(struct il_priv *il)
EXPORT_SYMBOL(il_alloc_txq_mem);
void
-il_txq_mem(struct il_priv *il)
+il_free_txq_mem(struct il_priv *il)
{
kfree(il->txq);
il->txq = NULL;
}
-EXPORT_SYMBOL(il_txq_mem);
-
-#ifdef CONFIG_IWLEGACY_DEBUGFS
-
-#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
-
-void
-il_reset_traffic_log(struct il_priv *il)
-{
- il->tx_traffic_idx = 0;
- il->rx_traffic_idx = 0;
- if (il->tx_traffic)
- memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
- if (il->rx_traffic)
- memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
-}
-
-int
-il_alloc_traffic_mem(struct il_priv *il)
-{
- u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
-
- if (il_debug_level & IL_DL_TX) {
- if (!il->tx_traffic) {
- il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
- if (!il->tx_traffic)
- return -ENOMEM;
- }
- }
- if (il_debug_level & IL_DL_RX) {
- if (!il->rx_traffic) {
- il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
- if (!il->rx_traffic)
- return -ENOMEM;
- }
- }
- il_reset_traffic_log(il);
- return 0;
-}
-EXPORT_SYMBOL(il_alloc_traffic_mem);
-
-void
-il_free_traffic_mem(struct il_priv *il)
-{
- kfree(il->tx_traffic);
- il->tx_traffic = NULL;
-
- kfree(il->rx_traffic);
- il->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(il_free_traffic_mem);
-
-void
-il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
- struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(il_debug_level & IL_DL_TX)))
- return;
-
- if (!il->tx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len =
- (length >
- IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((il->tx_traffic +
- (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
- len);
- il->tx_traffic_idx =
- (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
-
-void
-il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
- struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(il_debug_level & IL_DL_RX)))
- return;
-
- if (!il->rx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len =
- (length >
- IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((il->rx_traffic +
- (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
- len);
- il->rx_traffic_idx =
- (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
-
-const char *
-il_get_mgmt_string(int cmd)
-{
- switch (cmd) {
- IL_CMD(MANAGEMENT_ASSOC_REQ);
- IL_CMD(MANAGEMENT_ASSOC_RESP);
- IL_CMD(MANAGEMENT_REASSOC_REQ);
- IL_CMD(MANAGEMENT_REASSOC_RESP);
- IL_CMD(MANAGEMENT_PROBE_REQ);
- IL_CMD(MANAGEMENT_PROBE_RESP);
- IL_CMD(MANAGEMENT_BEACON);
- IL_CMD(MANAGEMENT_ATIM);
- IL_CMD(MANAGEMENT_DISASSOC);
- IL_CMD(MANAGEMENT_AUTH);
- IL_CMD(MANAGEMENT_DEAUTH);
- IL_CMD(MANAGEMENT_ACTION);
- default:
- return "UNKNOWN";
-
- }
-}
-
-const char *
-il_get_ctrl_string(int cmd)
-{
- switch (cmd) {
- IL_CMD(CONTROL_BACK_REQ);
- IL_CMD(CONTROL_BACK);
- IL_CMD(CONTROL_PSPOLL);
- IL_CMD(CONTROL_RTS);
- IL_CMD(CONTROL_CTS);
- IL_CMD(CONTROL_ACK);
- IL_CMD(CONTROL_CFEND);
- IL_CMD(CONTROL_CFENDACK);
- default:
- return "UNKNOWN";
-
- }
-}
-
-void
-il_clear_traffic_stats(struct il_priv *il)
-{
- memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
- memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLEGACY_DEBUGFS defined,
- * il_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_stats
- * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of il_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
-{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &il->tx_stats;
- else
- stats = &il->rx_stats;
-
- if (ieee80211_is_mgmt(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
- stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
- stats->mgmt[MANAGEMENT_PROBE_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
- stats->mgmt[MANAGEMENT_PROBE_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BEACON):
- stats->mgmt[MANAGEMENT_BEACON]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ATIM):
- stats->mgmt[MANAGEMENT_ATIM]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
- stats->mgmt[MANAGEMENT_DISASSOC]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- stats->mgmt[MANAGEMENT_AUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- stats->mgmt[MANAGEMENT_DEAUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACTION):
- stats->mgmt[MANAGEMENT_ACTION]++;
- break;
- }
- } else if (ieee80211_is_ctl(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
- stats->ctrl[CONTROL_BACK_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BACK):
- stats->ctrl[CONTROL_BACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
- stats->ctrl[CONTROL_PSPOLL]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_RTS):
- stats->ctrl[CONTROL_RTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CTS):
- stats->ctrl[CONTROL_CTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACK):
- stats->ctrl[CONTROL_ACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFEND):
- stats->ctrl[CONTROL_CFEND]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
- stats->ctrl[CONTROL_CFENDACK]++;
- break;
- }
- } else {
- /* data */
- stats->data_cnt++;
- stats->data_bytes += len;
- }
-}
-EXPORT_SYMBOL(il_update_stats);
-#endif
+EXPORT_SYMBOL(il_free_txq_mem);
int
il_force_reset(struct il_priv *il, bool external)
@@ -4987,15 +4655,18 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p)
{
struct il_priv *il = hw->priv;
- struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
- u32 modes;
int err;
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
mutex_lock(&il->mutex);
+ D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
+ vif->type, vif->addr, newtype, newp2p);
- if (!ctx->vif || !il_is_ready_rf(il)) {
+ if (newp2p) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!il->vif || !il_is_ready_rf(il)) {
/*
* Huh? But wait ... this can maybe happen when
* we're in the middle of a firmware restart!
@@ -5004,23 +4675,11 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- modes = ctx->interface_modes | ctx->exclusive_interface_modes;
- if (!(modes & BIT(newtype))) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
- (il->ctx.exclusive_interface_modes & BIT(newtype))) {
- err = -EINVAL;
- goto out;
- }
-
/* success */
il_teardown_interface(il, vif, true);
vif->type = newtype;
- vif->p2p = newp2p;
- err = il_setup_interface(il, ctx);
+ vif->p2p = false;
+ err = il_set_mode(il);
WARN_ON(err);
/*
* We've switched internally, but submitting to the
@@ -5032,7 +4691,9 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = 0;
out:
+ D_MAC80211("leave err %d\n", err);
mutex_unlock(&il->mutex);
+
return err;
}
EXPORT_SYMBOL(il_mac_change_interface);
@@ -5056,11 +4717,11 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
timeout =
txq->time_stamp +
- msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+ msecs_to_jiffies(il->cfg->wd_timeout);
if (time_after(jiffies, timeout)) {
IL_ERR("Queue %d stuck for %u ms.\n", q->id,
- il->cfg->base_params->wd_timeout);
+ il->cfg->wd_timeout);
ret = il_force_reset(il, false);
return (ret == -EAGAIN) ? 0 : 1;
}
@@ -5088,7 +4749,7 @@ il_bg_watchdog(unsigned long data)
if (test_bit(S_EXIT_PENDING, &il->status))
return;
- timeout = il->cfg->base_params->wd_timeout;
+ timeout = il->cfg->wd_timeout;
if (timeout == 0)
return;
@@ -5115,7 +4776,7 @@ EXPORT_SYMBOL(il_bg_watchdog);
void
il_setup_watchdog(struct il_priv *il)
{
- unsigned int timeout = il->cfg->base_params->wd_timeout;
+ unsigned int timeout = il->cfg->wd_timeout;
if (timeout)
mod_timer(&il->watchdog,
@@ -5229,9 +4890,9 @@ il_pci_resume(struct device *device)
hw_rfkill = true;
if (hw_rfkill)
- set_bit(S_RF_KILL_HW, &il->status);
+ set_bit(S_RFKILL, &il->status);
else
- clear_bit(S_RF_KILL_HW, &il->status);
+ clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
@@ -5252,28 +4913,25 @@ EXPORT_SYMBOL(il_pm_ops);
#endif /* CONFIG_PM */
static void
-il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+il_update_qos(struct il_priv *il)
{
if (test_bit(S_EXIT_PENDING, &il->status))
return;
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
+ il->qos_data.def_qos_parm.qos_flags = 0;
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
+ if (il->qos_data.qos_active)
+ il->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+ if (il->ht.enabled)
+ il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+ il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
- il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
+ il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
+ &il->qos_data.def_qos_parm, NULL);
}
/**
@@ -5287,19 +4945,14 @@ il_mac_config(struct ieee80211_hw *hw, u32 changed)
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = conf->channel;
struct il_ht_config *ht_conf = &il->current_ht_config;
- struct il_rxon_context *ctx = &il->ctx;
unsigned long flags = 0;
int ret = 0;
u16 ch;
int scan_active = 0;
bool ht_changed = false;
- if (WARN_ON(!il->cfg->ops->legacy))
- return -EOPNOTSUPP;
-
mutex_lock(&il->mutex);
-
- D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+ D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
changed);
if (unlikely(test_bit(S_SCANNING, &il->status))) {
@@ -5319,8 +4972,8 @@ il_mac_config(struct ieee80211_hw *hw, u32 changed)
* set up the SM PS mode to OFF if an HT channel is
* configured.
*/
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
}
/* during scanning mac80211 will delay channel setting until
@@ -5349,48 +5002,48 @@ il_mac_config(struct ieee80211_hw *hw, u32 changed)
spin_lock_irqsave(&il->lock, flags);
/* Configure HT40 channels */
- if (ctx->ht.enabled != conf_is_ht(conf)) {
- ctx->ht.enabled = conf_is_ht(conf);
+ if (il->ht.enabled != conf_is_ht(conf)) {
+ il->ht.enabled = conf_is_ht(conf);
ht_changed = true;
}
- if (ctx->ht.enabled) {
+ if (il->ht.enabled) {
if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
+ il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
+ il->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
+ il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
+ il->ht.is_40mhz = true;
} else {
- ctx->ht.extension_chan_offset =
+ il->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
+ il->ht.is_40mhz = false;
}
} else
- ctx->ht.is_40mhz = false;
+ il->ht.is_40mhz = false;
/*
* Default to no protection. Protection mode will
* later be set from BSS config in il_ht_conf
*/
- ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+ il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
/* if we are switching from ht to 2.4 clear flags
* from any ht related info since 2.4 does not
* support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
+ if ((le16_to_cpu(il->staging.channel) != ch))
+ il->staging.flags = 0;
- il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_channel(il, channel);
il_set_rxon_ht(il, ht_conf);
- il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+ il_set_flags_for_band(il, channel->band, il->vif);
spin_unlock_irqrestore(&il->lock, flags);
- if (il->cfg->ops->legacy->update_bcast_stations)
- ret = il->cfg->ops->legacy->update_bcast_stations(il);
+ if (il->ops->update_bcast_stations)
+ ret = il->ops->update_bcast_stations(il);
set_ch_out:
/* The list of supported rates and rate mask can be different
@@ -5420,16 +5073,17 @@ set_ch_out:
if (scan_active)
goto out;
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- il_commit_rxon(il, ctx);
+ if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
+ il_commit_rxon(il);
else
D_INFO("Not re-sending same RXON configuration.\n");
if (ht_changed)
- il_update_qos(il, ctx);
+ il_update_qos(il);
out:
- D_MAC80211("leave\n");
+ D_MAC80211("leave ret %d\n", ret);
mutex_unlock(&il->mutex);
+
return ret;
}
EXPORT_SYMBOL(il_mac_config);
@@ -5439,26 +5093,18 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct il_priv *il = hw->priv;
unsigned long flags;
- struct il_rxon_context *ctx = &il->ctx;
-
- if (WARN_ON(!il->cfg->ops->legacy))
- return;
mutex_lock(&il->mutex);
- D_MAC80211("enter\n");
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
spin_lock_irqsave(&il->lock, flags);
- memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
- spin_unlock_irqrestore(&il->lock, flags);
- spin_lock_irqsave(&il->lock, flags);
+ memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
if (il->beacon_skb)
dev_kfree_skb(il->beacon_skb);
-
il->beacon_skb = NULL;
-
il->timestamp = 0;
spin_unlock_irqrestore(&il->lock, flags);
@@ -5470,17 +5116,14 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return;
}
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- il_commit_rxon(il, ctx);
+ /* we are restarting association process */
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il);
il_set_rate(il);
- mutex_unlock(&il->mutex);
-
D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
}
EXPORT_SYMBOL(il_mac_reset_tsf);
@@ -5490,16 +5133,15 @@ il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
struct il_ht_config *ht_conf = &il->current_ht_config;
struct ieee80211_sta *sta;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
D_ASSOC("enter:\n");
- if (!ctx->ht.enabled)
+ if (!il->ht.enabled)
return;
- ctx->ht.protection =
+ il->ht.protection =
bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
+ il->ht.non_gf_sta_present =
!!(bss_conf->
ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
@@ -5548,16 +5190,14 @@ il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
static inline void
il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
{
- struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
-
/*
* inform the ucode that there is no longer an
* association and that no more packets should be
* sent
*/
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- il_commit_rxon(il, ctx);
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il->staging.assoc_id = 0;
+ il_commit_rxon(il);
}
static void
@@ -5575,8 +5215,8 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
lockdep_assert_held(&il->mutex);
- if (!il->beacon_ctx) {
- IL_ERR("update beacon but no beacon context!\n");
+ if (!il->beacon_enabled) {
+ IL_ERR("update beacon with no beaconing enabled\n");
dev_kfree_skb(skb);
return;
}
@@ -5599,7 +5239,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return;
}
- il->cfg->ops->legacy->post_associate(il);
+ il->ops->post_associate(il);
}
void
@@ -5607,17 +5247,13 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changes)
{
struct il_priv *il = hw->priv;
- struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
int ret;
- if (WARN_ON(!il->cfg->ops->legacy))
- return;
-
- D_MAC80211("changes = 0x%X\n", changes);
-
mutex_lock(&il->mutex);
+ D_MAC80211("enter: changes 0x%x\n", changes);
if (!il_is_alive(il)) {
+ D_MAC80211("leave - not alive\n");
mutex_unlock(&il->mutex);
return;
}
@@ -5626,21 +5262,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned long flags;
spin_lock_irqsave(&il->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- il_update_qos(il, ctx);
+ il->qos_data.qos_active = bss_conf->qos;
+ il_update_qos(il);
spin_unlock_irqrestore(&il->lock, flags);
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
+ /* FIXME: can we remove beacon_enabled ? */
if (vif->bss_conf.enable_beacon)
- il->beacon_ctx = ctx;
+ il->beacon_enabled = true;
else
- il->beacon_ctx = NULL;
+ il->beacon_enabled = false;
}
if (changes & BSS_CHANGED_BSSID) {
@@ -5652,23 +5284,20 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* below/in post_associate will fail.
*/
if (il_scan_cancel_timeout(il, 100)) {
- IL_WARN("Aborted scan still in progress after 100ms\n");
- D_MAC80211("leaving - scan abort failed.\n");
+ D_MAC80211("leave - scan abort failed\n");
mutex_unlock(&il->mutex);
return;
}
/* mac80211 only sets assoc when in STATION mode */
if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ memcpy(il->staging.bssid_addr, bss_conf->bssid,
ETH_ALEN);
/* currently needed in a few places */
memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- }
-
+ } else
+ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
}
/*
@@ -5682,21 +5311,21 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
}
if (changes & BSS_CHANGED_ERP_CTS_PROT) {
D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ il->staging.flags |= RXON_FLG_SELF_CTS_EN;
else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
}
if (changes & BSS_CHANGED_BASIC_RATES) {
@@ -5706,12 +5335,12 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* like this here:
*
if (A-band)
- ctx->staging.ofdm_basic_rates =
+ il->staging.ofdm_basic_rates =
bss_conf->basic_rates;
else
- ctx->staging.ofdm_basic_rates =
+ il->staging.ofdm_basic_rates =
bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
+ il->staging.cck_basic_rates =
bss_conf->basic_rates & 0xF;
*/
}
@@ -5719,55 +5348,52 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_HT) {
il_ht_conf(il, vif);
- if (il->cfg->ops->hcmd->set_rxon_chain)
- il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ if (il->ops->set_rxon_chain)
+ il->ops->set_rxon_chain(il);
}
if (changes & BSS_CHANGED_ASSOC) {
D_MAC80211("ASSOC %d\n", bss_conf->assoc);
if (bss_conf->assoc) {
- il->timestamp = bss_conf->timestamp;
+ il->timestamp = bss_conf->last_tsf;
if (!il_is_rfkill(il))
- il->cfg->ops->legacy->post_associate(il);
+ il->ops->post_associate(il);
} else
il_set_no_assoc(il, vif);
}
- if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+ if (changes && il_is_associated(il) && bss_conf->aid) {
D_MAC80211("Changes (%#x) while associated\n", changes);
- ret = il_send_rxon_assoc(il, ctx);
+ ret = il_send_rxon_assoc(il);
if (!ret) {
/* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active, &ctx->staging,
+ memcpy((void *)&il->active, &il->staging,
sizeof(struct il_rxon_cmd));
}
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ memcpy(il->staging.bssid_addr, bss_conf->bssid,
ETH_ALEN);
memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
- il->cfg->ops->legacy->config_ap(il);
+ il->ops->config_ap(il);
} else
il_set_no_assoc(il, vif);
}
if (changes & BSS_CHANGED_IBSS) {
- ret =
- il->cfg->ops->legacy->manage_ibss_station(il, vif,
- bss_conf->
- ibss_joined);
+ ret = il->ops->manage_ibss_station(il, vif,
+ bss_conf->ibss_joined);
if (ret)
IL_ERR("failed to %s IBSS station %pM\n",
bss_conf->ibss_joined ? "add" : "remove",
bss_conf->bssid);
}
- mutex_unlock(&il->mutex);
-
D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
}
EXPORT_SYMBOL(il_mac_bss_info_changed);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index abfa388588be..5f5017767b99 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -143,12 +143,6 @@ struct il_queue {
* space less than this */
};
-/* One for each TFD */
-struct il_tx_info {
- struct sk_buff *skb;
- struct il_rxon_context *ctx;
-};
-
/**
* struct il_tx_queue - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
@@ -156,7 +150,7 @@ struct il_tx_info {
* @cmd: array of command/TX buffer pointers
* @meta: array of meta data for each command/tx buffer
* @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
+ * @skbs: array of per-TFD socket buffer pointers
* @time_stamp: time (in jiffies) of last read_ptr change
* @need_update: indicates need to update read/write idx
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
@@ -172,7 +166,7 @@ struct il_tx_queue {
void *tfds;
struct il_device_cmd **cmd;
struct il_cmd_meta *meta;
- struct il_tx_info *txb;
+ struct sk_buff **skbs;
unsigned long time_stamp;
u8 need_update;
u8 sched_retry;
@@ -431,12 +425,6 @@ struct il_eeprom_calib_info {
#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-struct il_eeprom_ops {
- const u32 regulatory_bands[7];
- int (*acquire_semaphore) (struct il_priv *il);
- void (*release_semaphore) (struct il_priv *il);
-};
-
int il_eeprom_init(struct il_priv *il);
void il_eeprom_free(struct il_priv *il);
const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
@@ -735,13 +723,12 @@ struct il_qos_info {
struct il_station_entry {
struct il_addsta_cmd sta;
struct il_tid_data tid[MAX_TID_COUNT];
- u8 used, ctxid;
+ u8 used;
struct il_hw_key keyinfo;
struct il_link_quality_cmd *lq;
};
struct il_station_priv_common {
- struct il_rxon_context *ctx;
u8 sta_id;
};
@@ -752,7 +739,6 @@ struct il_station_priv_common {
* space for us to put data into.
*/
struct il_vif_priv {
- struct il_rxon_context *ctx;
u8 ibss_bssid_sta_id;
};
@@ -816,6 +802,7 @@ struct il_sensitivity_ranges {
/**
* struct il_hw_params
+ * @bcast_id: f/w broadcast station ID
* @max_txq_num: Max # Tx queues supported
* @dma_chnl_num: Number of Tx DMA/FIFO channels
* @scd_bc_tbls_size: size of scheduler byte count tables
@@ -836,6 +823,7 @@ struct il_sensitivity_ranges {
* @struct il_sensitivity_ranges: range of sensitivity values
*/
struct il_hw_params {
+ u8 bcast_id;
u8 max_txq_num;
u8 dma_chnl_num;
u16 scd_bc_tbls_size;
@@ -968,26 +956,6 @@ enum il4965_chain_noise_state {
IL_CHAIN_NOISE_DONE,
};
-enum il4965_calib_enabled_state {
- IL_CALIB_DISABLED = 0, /* must be 0 */
- IL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum il_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum il_calib {
- IL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct il_calib_result {
- void *buf;
- size_t buf_len;
-};
-
enum ucode_type {
UCODE_NONE = 0,
UCODE_INIT,
@@ -1152,55 +1120,6 @@ struct il_force_reset {
struct il_rxon_context {
struct ieee80211_vif *vif;
-
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
- /*
- * We could use the vif to indicate active, but we
- * also need it to be active during disabling when
- * we already removed the vif for type setting.
- */
- bool always_active, is_active;
-
- bool ht_need_multiple_chains;
-
- int ctxid;
-
- u32 interface_modes, exclusive_interface_modes;
- u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
- /*
- * We declare this const so it can only be
- * changed via explicit cast within the
- * routines that actually update the physical
- * hardware.
- */
- const struct il_rxon_cmd active;
- struct il_rxon_cmd staging;
-
- struct il_rxon_time_cmd timing;
-
- struct il_qos_info qos_data;
-
- u8 bcast_sta_id, ap_sta_id;
-
- u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
- u8 qos_cmd;
- u8 wep_key_cmd;
-
- struct il_wep_key wep_keys[WEP_KEYS_MAX];
- u8 key_mapping_keys;
-
- __le32 station_flags;
-
- struct {
- bool non_gf_sta_present;
- u8 protection;
- bool enabled, is_40mhz;
- u8 extension_chan_offset;
- } ht;
};
struct il_power_mgr {
@@ -1211,12 +1130,15 @@ struct il_power_mgr {
};
struct il_priv {
-
- /* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
+
struct il_cfg *cfg;
+ const struct il_ops *ops;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ const struct il_debugfs_ops *debugfs_ops;
+#endif
/* temporary frame storage list */
struct list_head free_frames;
@@ -1253,9 +1175,6 @@ struct il_priv {
s32 temperature; /* degrees Kelvin */
s32 last_temperature;
- /* init calibration results */
- struct il_calib_result calib_results[IL_CALIB_MAX];
-
/* Scan related variables */
unsigned long scan_start;
unsigned long scan_start_tsf;
@@ -1304,7 +1223,28 @@ struct il_priv {
u8 ucode_write_complete; /* the image write is complete */
char firmware_name[25];
- struct il_rxon_context ctx;
+ struct ieee80211_vif *vif;
+
+ struct il_qos_info qos_data;
+
+ struct {
+ bool enabled;
+ bool is_40mhz;
+ bool non_gf_sta_present;
+ u8 protection;
+ u8 extension_chan_offset;
+ } ht;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct il_rxon_cmd active;
+ struct il_rxon_cmd staging;
+
+ struct il_rxon_time_cmd timing;
__le16 switch_channel;
@@ -1427,6 +1367,9 @@ struct il_priv {
u8 phy_calib_chain_noise_reset_cmd;
u8 phy_calib_chain_noise_gain_cmd;
+ u8 key_mapping_keys;
+ struct il_wep_key wep_keys[WEP_KEYS_MAX];
+
struct il_notif_stats stats;
#ifdef CONFIG_IWLEGACY_DEBUGFS
struct il_notif_stats accum_stats;
@@ -1449,7 +1392,7 @@ struct il_priv {
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct il_rxon_context *beacon_ctx;
+ bool beacon_enabled;
struct sk_buff *beacon_skb;
struct work_struct tx_flush;
@@ -1507,30 +1450,10 @@ il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
clear_bit(txq_id, &il->txq_ctx_active_msk);
}
-static inline struct ieee80211_hdr *
-il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
-{
- if (il->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
- data;
- return NULL;
-}
-
-static inline struct il_rxon_context *
-il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
- struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- return vif_priv->ctx;
-}
-
-#define for_each_context(il, _ctx) \
- for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
-
static inline int
il_is_associated(struct il_priv *il)
{
- return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+ return (il->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
}
static inline int
@@ -1540,12 +1463,6 @@ il_is_any_associated(struct il_priv *il)
}
static inline int
-il_is_associated_ctx(struct il_rxon_context *ctx)
-{
- return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int
il_is_channel_valid(const struct il_channel_info *ch_info)
{
if (ch_info == NULL)
@@ -1613,25 +1530,6 @@ il_free_pages(struct il_priv *il, unsigned long page)
#define IL_RX_BUF_SIZE_4K (4 * 1024)
#define IL_RX_BUF_SIZE_8K (8 * 1024)
-struct il_hcmd_ops {
- int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
- int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
- void (*set_rxon_chain) (struct il_priv *il,
- struct il_rxon_context *ctx);
-};
-
-struct il_hcmd_utils_ops {
- u16(*get_hcmd_size) (u8 cmd_id, u16 len);
- u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
- int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
- void (*post_scan) (struct il_priv *il);
-};
-
-struct il_apm_ops {
- int (*init) (struct il_priv *il);
- void (*config) (struct il_priv *il);
-};
-
#ifdef CONFIG_IWLEGACY_DEBUGFS
struct il_debugfs_ops {
ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
@@ -1644,13 +1542,7 @@ struct il_debugfs_ops {
};
#endif
-struct il_temp_ops {
- void (*temperature) (struct il_priv *il);
-};
-
-struct il_lib_ops {
- /* set hw dependent parameters */
- int (*set_hw_params) (struct il_priv *il);
+struct il_ops {
/* Handling TX */
void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
struct il_tx_queue *txq,
@@ -1660,8 +1552,6 @@ struct il_lib_ops {
u16 len, u8 reset, u8 pad);
void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
- /* setup Rx handler */
- void (*handler_setup) (struct il_priv *il);
/* alive notification after init uCode load */
void (*init_alive_start) (struct il_priv *il);
/* check validity of rtc data address */
@@ -1674,45 +1564,33 @@ struct il_lib_ops {
int (*set_channel_switch) (struct il_priv *il,
struct ieee80211_channel_switch *ch_switch);
/* power management */
- struct il_apm_ops apm_ops;
+ int (*apm_init) (struct il_priv *il);
- /* power */
+ /* tx power */
int (*send_tx_power) (struct il_priv *il);
void (*update_chain_flags) (struct il_priv *il);
/* eeprom operations */
- struct il_eeprom_ops eeprom_ops;
+ int (*eeprom_acquire_semaphore) (struct il_priv *il);
+ void (*eeprom_release_semaphore) (struct il_priv *il);
- /* temperature */
- struct il_temp_ops temp_ops;
-
-#ifdef CONFIG_IWLEGACY_DEBUGFS
- struct il_debugfs_ops debugfs_ops;
-#endif
+ int (*rxon_assoc) (struct il_priv *il);
+ int (*commit_rxon) (struct il_priv *il);
+ void (*set_rxon_chain) (struct il_priv *il);
-};
-
-struct il_led_ops {
- int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
-};
+ u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+ u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
-struct il_legacy_ops {
+ int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+ void (*post_scan) (struct il_priv *il);
void (*post_associate) (struct il_priv *il);
void (*config_ap) (struct il_priv *il);
/* station management */
int (*update_bcast_stations) (struct il_priv *il);
int (*manage_ibss_station) (struct il_priv *il,
struct ieee80211_vif *vif, bool add);
-};
-struct il_ops {
- const struct il_lib_ops *lib;
- const struct il_hcmd_ops *hcmd;
- const struct il_hcmd_utils_ops *utils;
- const struct il_led_ops *led;
- const struct il_nic_ops *nic;
- const struct il_legacy_ops *legacy;
- const struct ieee80211_ops *ieee80211_ops;
+ int (*send_led_cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
};
struct il_mod_params {
@@ -1725,37 +1603,6 @@ struct il_mod_params {
int restart_fw; /* def: 1 = restart firmware */
};
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in common.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- * sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- * chain noise calibration operation
- */
-struct il_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues; /* def: HW dependent */
- /* for il_apm_init() */
- u32 pll_cfg_val;
- bool set_l0s;
- bool use_bsm;
-
- u16 led_compensation;
- int chain_noise_num_beacons;
- unsigned int wd_timeout;
- bool temperature_kelvin;
- const bool ucode_tracing;
- const bool sensitivity_calib_by_driver;
- const bool chain_noise_calib_by_driver;
-};
-
#define IL_LED_SOLID 11
#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
@@ -1821,7 +1668,6 @@ struct il_cfg {
unsigned int sku;
u16 eeprom_ver;
u16 eeprom_calib_ver;
- const struct il_ops *ops;
/* module based parameters which can be set from modprobe cmd */
const struct il_mod_params *mod_params;
/* params not likely to change within a device family */
@@ -1829,31 +1675,45 @@ struct il_cfg {
/* params likely to change within a device family */
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
enum il_led_mode led_mode;
+
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues; /* def: HW dependent */
+ /* for il_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+
+ const u32 regulatory_bands[7];
};
/***************************
* L i b *
***************************/
-struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params);
int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
- int hw_decrypt);
-int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
-int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
-int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
- struct il_rxon_context *ctx);
-void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
- enum ieee80211_band band, struct ieee80211_vif *vif);
+void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il);
+int il_full_rxon_required(struct il_priv *il);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
+void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif);
u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
-bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+bool il_is_ht40_tx_allowed(struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_cap);
-void il_connection_init_rx_config(struct il_priv *il,
- struct il_rxon_context *ctx);
+void il_connection_init_rx_config(struct il_priv *il);
void il_set_rate(struct il_priv *il);
int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
u32 decrypt_res, struct ieee80211_rx_status *stats);
@@ -1864,60 +1724,24 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
int il_alloc_txq_mem(struct il_priv *il);
-void il_txq_mem(struct il_priv *il);
+void il_free_txq_mem(struct il_priv *il);
#ifdef CONFIG_IWLEGACY_DEBUGFS
-int il_alloc_traffic_mem(struct il_priv *il);
-void il_free_traffic_mem(struct il_priv *il);
-void il_reset_traffic_log(struct il_priv *il);
-void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
- struct ieee80211_hdr *header);
-void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
- struct ieee80211_hdr *header);
-const char *il_get_mgmt_string(int cmd);
-const char *il_get_ctrl_string(int cmd);
-void il_clear_traffic_stats(struct il_priv *il);
-void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
#else
-static inline int
-il_alloc_traffic_mem(struct il_priv *il)
-{
- return 0;
-}
-
-static inline void
-il_free_traffic_mem(struct il_priv *il)
-{
-}
-
-static inline void
-il_reset_traffic_log(struct il_priv *il)
-{
-}
-
-static inline void
-il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
- struct ieee80211_hdr *header)
-{
-}
-
-static inline void
-il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
- struct ieee80211_hdr *header)
-{
-}
-
static inline void
il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
{
}
#endif
+
/*****************************************************
- * RX handlers.
- * **************************************************/
+ * Handlers
+ ***************************************************/
void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
/*****************************************************
* RX
@@ -1928,25 +1752,20 @@ int il_rx_queue_alloc(struct il_priv *il);
void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
int il_rx_queue_space(const struct il_rx_queue *q);
void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
-/* Handlers */
+
void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
void il_chswitch_done(struct il_priv *il, bool is_success);
-void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
-
-/* TX helpers */
/*****************************************************
* TX
******************************************************/
-void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
- u32 txq_id);
-void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
- int slots_num, u32 txq_id);
-void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-void il_tx_queue_free(struct il_priv *il, int txq_id);
-void il_setup_watchdog(struct il_priv *il);
+extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+extern void il_tx_queue_free(struct il_priv *il, int txq_id);
+extern void il_setup_watchdog(struct il_priv *il);
/*****************************************************
* TX power
****************************************************/
@@ -1956,7 +1775,7 @@ int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
* Rate
******************************************************************************/
-u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+u8 il_get_lowest_plcp(struct il_priv *il);
/*******************************************************************************
* Scanning
@@ -2043,10 +1862,10 @@ extern const struct dev_pm_ops il_pm_ops;
******************************************************/
void il4965_dump_nic_error_log(struct il_priv *il);
#ifdef CONFIG_IWLEGACY_DEBUG
-void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+void il_print_rx_config_cmd(struct il_priv *il);
#else
static inline void
-il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+il_print_rx_config_cmd(struct il_priv *il)
{
}
#endif
@@ -2064,7 +1883,7 @@ void il_free_geos(struct il_priv *il);
#define S_HCMD_ACTIVE 0 /* host command in progress */
/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
#define S_INT_ENABLED 2
-#define S_RF_KILL_HW 3
+#define S_RFKILL 3
#define S_CT_KILL 4
#define S_INIT 5
#define S_ALIVE 6
@@ -2103,15 +1922,9 @@ il_is_init(struct il_priv *il)
}
static inline int
-il_is_rfkill_hw(struct il_priv *il)
-{
- return test_bit(S_RF_KILL_HW, &il->status);
-}
-
-static inline int
il_is_rfkill(struct il_priv *il)
{
- return il_is_rfkill_hw(il);
+ return test_bit(S_RFKILL, &il->status);
}
static inline int
@@ -2132,20 +1945,23 @@ il_is_ready_rf(struct il_priv *il)
extern void il_send_bt_config(struct il_priv *il);
extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-void il_apm_stop(struct il_priv *il);
+extern void il_apm_stop(struct il_priv *il);
+extern void _il_apm_stop(struct il_priv *il);
+
int il_apm_init(struct il_priv *il);
-int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+int il_send_rxon_timing(struct il_priv *il);
+
static inline int
-il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+il_send_rxon_assoc(struct il_priv *il)
{
- return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+ return il->ops->rxon_assoc(il);
}
static inline int
-il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+il_commit_rxon(struct il_priv *il)
{
- return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+ return il->ops->commit_rxon(il);
}
static inline const struct ieee80211_supported_band *
@@ -2166,7 +1982,7 @@ irqreturn_t il_isr(int irq, void *data);
extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern int _il_grab_nic_access(struct il_priv *il);
+extern bool _il_grab_nic_access(struct il_priv *il);
extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
extern u32 il_rd_prph(struct il_priv *il, u32 reg);
@@ -2177,20 +1993,20 @@ extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
static inline void
_il_write8(struct il_priv *il, u32 ofs, u8 val)
{
- iowrite8(val, il->hw_base + ofs);
+ writeb(val, il->hw_base + ofs);
}
#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
static inline void
_il_wr(struct il_priv *il, u32 ofs, u32 val)
{
- iowrite32(val, il->hw_base + ofs);
+ writel(val, il->hw_base + ofs);
}
static inline u32
_il_rd(struct il_priv *il, u32 ofs)
{
- return ioread32(il->hw_base + ofs);
+ return readl(il->hw_base + ofs);
}
static inline void
@@ -2209,6 +2025,13 @@ static inline void
_il_release_nic_access(struct il_priv *il)
{
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * In above we are reading CSR_GP_CNTRL register, what will flush any
+ * previous writes, but still want write, which clear MAC_ACCESS_REQ
+ * bit, be performed on PCI bus before any other writes scheduled on
+ * different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
}
static inline u32
@@ -2231,7 +2054,7 @@ il_wr(struct il_priv *il, u32 reg, u32 value)
unsigned long reg_flags;
spin_lock_irqsave(&il->reg_lock, reg_flags);
- if (!_il_grab_nic_access(il)) {
+ if (likely(_il_grab_nic_access(il))) {
_il_wr(il, reg, value);
_il_release_nic_access(il);
}
@@ -2242,7 +2065,6 @@ static inline u32
_il_rd_prph(struct il_priv *il, u32 reg)
{
_il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
return _il_rd(il, HBUS_TARG_PRPH_RDAT);
}
@@ -2250,7 +2072,6 @@ static inline void
_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
{
_il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
_il_wr(il, HBUS_TARG_PRPH_WDAT, val);
}
@@ -2260,9 +2081,10 @@ il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
unsigned long reg_flags;
spin_lock_irqsave(&il->reg_lock, reg_flags);
- _il_grab_nic_access(il);
- _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
- _il_release_nic_access(il);
+ if (likely(_il_grab_nic_access(il))) {
+ _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
+ _il_release_nic_access(il);
+ }
spin_unlock_irqrestore(&il->reg_lock, reg_flags);
}
@@ -2272,9 +2094,10 @@ il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
unsigned long reg_flags;
spin_lock_irqsave(&il->reg_lock, reg_flags);
- _il_grab_nic_access(il);
- _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
- _il_release_nic_access(il);
+ if (likely(_il_grab_nic_access(il))) {
+ _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
+ _il_release_nic_access(il);
+ }
spin_unlock_irqrestore(&il->reg_lock, reg_flags);
}
@@ -2285,10 +2108,11 @@ il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
u32 val;
spin_lock_irqsave(&il->reg_lock, reg_flags);
- _il_grab_nic_access(il);
- val = _il_rd_prph(il, reg);
- _il_wr_prph(il, reg, (val & ~mask));
- _il_release_nic_access(il);
+ if (likely(_il_grab_nic_access(il))) {
+ val = _il_rd_prph(il, reg);
+ _il_wr_prph(il, reg, (val & ~mask));
+ _il_release_nic_access(il);
+ }
spin_unlock_irqrestore(&il->reg_lock, reg_flags);
}
@@ -2303,23 +2127,22 @@ il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
(this is for the IBSS BSSID stations) */
#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
-void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
-void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_restore_stations(struct il_priv *il);
+void il_clear_ucode_stations(struct il_priv *il);
void il_dealloc_bcast_stations(struct il_priv *il);
int il_get_free_ucode_key_idx(struct il_priv *il);
int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
-int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr, bool is_ap,
+int il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
struct ieee80211_sta *sta, u8 *sta_id_r);
int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+u8 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta);
-int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
- struct il_link_quality_cmd *lq, u8 flags, bool init);
+int il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
+ u8 flags, bool init);
/**
* il_clear_driver_stations - clear knowledge of all stations from driver
@@ -2334,24 +2157,11 @@ static inline void
il_clear_driver_stations(struct il_priv *il)
{
unsigned long flags;
- struct il_rxon_context *ctx = &il->ctx;
spin_lock_irqsave(&il->sta_lock, flags);
memset(il->stations, 0, sizeof(il->stations));
il->num_stations = 0;
-
il->ucode_key_table = 0;
-
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
-
spin_unlock_irqrestore(&il->sta_lock, flags);
}
@@ -2376,13 +2186,12 @@ il_sta_id(struct ieee80211_sta *sta)
* inline wraps that pattern.
*/
static inline int
-il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
- struct ieee80211_sta *sta)
+il_sta_id_or_broadcast(struct il_priv *il, struct ieee80211_sta *sta)
{
int sta_id;
if (!sta)
- return context->bcast_sta_id;
+ return il->hw_params.bcast_id;
sta_id = il_sta_id(sta);
@@ -2565,10 +2374,10 @@ struct il_rb_status {
__le32 __unused; /* 3945 only */
} __packed;
-#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_SIZE_MAX 256
+#define TFD_QUEUE_SIZE_BC_DUP 64
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IL_NUM_OF_TBS 20
static inline u8
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index b1b8926a9c7b..229849150aac 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -31,6 +31,101 @@
#include "common.h"
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+ memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * il_update_stats function record all the MGMT, CTRL and DATA pkt for
+ * both TX and Rx . Use debugfs to display the rx/rx_stats
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &il->tx_stats;
+ else
+ stats = &il->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(il_update_stats);
+
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, il, \
@@ -98,6 +193,46 @@ static const struct file_operations il_dbgfs_##name##_ops = { \
.llseek = generic_file_llseek, \
};
+static const char *
+il_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(MANAGEMENT_ASSOC_REQ);
+ IL_CMD(MANAGEMENT_ASSOC_RESP);
+ IL_CMD(MANAGEMENT_REASSOC_REQ);
+ IL_CMD(MANAGEMENT_REASSOC_RESP);
+ IL_CMD(MANAGEMENT_PROBE_REQ);
+ IL_CMD(MANAGEMENT_PROBE_RESP);
+ IL_CMD(MANAGEMENT_BEACON);
+ IL_CMD(MANAGEMENT_ATIM);
+ IL_CMD(MANAGEMENT_DISASSOC);
+ IL_CMD(MANAGEMENT_AUTH);
+ IL_CMD(MANAGEMENT_DEAUTH);
+ IL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+static const char *
+il_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(CONTROL_BACK_REQ);
+ IL_CMD(CONTROL_BACK);
+ IL_CMD(CONTROL_PSPOLL);
+ IL_CMD(CONTROL_RTS);
+ IL_CMD(CONTROL_CTS);
+ IL_CMD(CONTROL_ACK);
+ IL_CMD(CONTROL_CFEND);
+ IL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
static ssize_t
il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
loff_t *ppos)
@@ -361,7 +496,7 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
const u8 *ptr;
char *buf;
u16 eeprom_ver;
- size_t eeprom_len = il->cfg->base_params->eeprom_size;
+ size_t eeprom_len = il->cfg->eeprom_size;
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16) {
@@ -495,8 +630,8 @@ il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
test_bit(S_INT_ENABLED, &il->status));
pos +=
- scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
- test_bit(S_RF_KILL_HW, &il->status));
+ scnprintf(buf + pos, bufsz - pos, "S_RFKILL:\t %d\n",
+ test_bit(S_RFKILL, &il->status));
pos +=
scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
test_bit(S_CT_KILL, &il->status));
@@ -644,12 +779,10 @@ il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
loff_t *ppos)
{
struct il_priv *il = file->private_data;
- struct il_rxon_context *ctx = &il->ctx;
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
- pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
for (i = 0; i < AC_NUM; i++) {
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -657,10 +790,10 @@ il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
pos +=
scnprintf(buf + pos, bufsz - pos,
"AC[%d]\t%u\t%u\t%u\t%u\n", i,
- ctx->qos_data.def_qos_parm.ac[i].cw_min,
- ctx->qos_data.def_qos_parm.ac[i].cw_max,
- ctx->qos_data.def_qos_parm.ac[i].aifsn,
- ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ il->qos_data.def_qos_parm.ac[i].cw_min,
+ il->qos_data.def_qos_parm.ac[i].cw_max,
+ il->qos_data.def_qos_parm.ac[i].aifsn,
+ il->qos_data.def_qos_parm.ac[i].edca_txop);
}
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -717,112 +850,6 @@ DEBUGFS_READ_FILE_OPS(qos);
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
static ssize_t
-il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct il_priv *il = file->private_data;
- int pos = 0, ofs = 0;
- int cnt = 0, entry;
- struct il_tx_queue *txq;
- struct il_queue *q;
- struct il_rx_queue *rxq = &il->rxq;
- char *buf;
- int bufsz =
- ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
- const u8 *ptr;
- ssize_t ret;
-
- if (!il->txq) {
- IL_ERR("txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IL_ERR("Can not allocate buffer\n");
- return -ENOMEM;
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
- txq = &il->txq[cnt];
- q = &txq->q;
- pos +=
- scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
- q->read_ptr, q->write_ptr);
- }
- if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
- ptr = il->tx_traffic;
- pos +=
- scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
- il->tx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos +=
- scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
- ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos +=
- scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
- ptr = il->rx_traffic;
- pos +=
- scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
- il->rx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos +=
- scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
- ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct il_priv *il = file->private_data;
- char buf[8];
- int buf_size;
- int traffic_log;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &traffic_log) != 1)
- return -EFAULT;
- if (traffic_log == 0)
- il_reset_traffic_log(il);
-
- return count;
-}
-
-static ssize_t
il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
loff_t *ppos)
{
@@ -835,7 +862,7 @@ il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
int cnt;
int ret;
const size_t bufsz =
- sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+ sizeof(char) * 64 * il->cfg->num_of_queues;
if (!il->txq) {
IL_ERR("txq not ready\n");
@@ -903,8 +930,8 @@ il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
- return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
- count, ppos);
+
+ return il->debugfs_ops->rx_stats_read(file, user_buf, count, ppos);
}
static ssize_t
@@ -912,8 +939,8 @@ il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
- return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
- count, ppos);
+
+ return il->debugfs_ops->tx_stats_read(file, user_buf, count, ppos);
}
static ssize_t
@@ -921,8 +948,8 @@ il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
- return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
- count, ppos);
+
+ return il->debugfs_ops->general_stats_read(file, user_buf, count, ppos);
}
static ssize_t
@@ -1153,7 +1180,7 @@ il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
int len = 0;
char buf[20];
- len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.flags));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1167,7 +1194,7 @@ il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
char buf[20];
len =
- sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+ sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1180,8 +1207,8 @@ il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
int pos = 0;
ssize_t ret = -EFAULT;
- if (il->cfg->ops->lib->dump_fh) {
- ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+ if (il->ops->dump_fh) {
+ ret = pos = il->ops->dump_fh(il, &buf, true);
if (buf) {
ret =
simple_read_from_buffer(user_buf, count, ppos, buf,
@@ -1298,14 +1325,13 @@ il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
timeout = IL_DEF_WD_TIMEOUT;
- il->cfg->base_params->wd_timeout = timeout;
+ il->cfg->wd_timeout = timeout;
il_setup_watchdog(il);
return count;
}
DEBUGFS_READ_FILE_OPS(rx_stats);
DEBUGFS_READ_FILE_OPS(tx_stats);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
@@ -1359,7 +1385,6 @@ il_dbgfs_register(struct il_priv *il, const char *name)
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
@@ -1372,17 +1397,17 @@ il_dbgfs_register(struct il_priv *il, const char *name)
DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
- if (il->cfg->base_params->sensitivity_calib_by_driver)
+ if (il->cfg->sensitivity_calib_by_driver)
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
- if (il->cfg->base_params->chain_noise_calib_by_driver)
+ if (il->cfg->chain_noise_calib_by_driver)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (il->cfg->base_params->sensitivity_calib_by_driver)
+ if (il->cfg->sensitivity_calib_by_driver)
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
&il->disable_sens_cal);
- if (il->cfg->base_params->chain_noise_calib_by_driver)
+ if (il->cfg->chain_noise_calib_by_driver)
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&il->disable_chain_noise_cal);
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ae08498dfcad..2fe62730dddd 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
- depends on PCI && MAC80211
+ depends on PCI && MAC80211 && HAS_IOMEM
select FW_LOADER
select NEW_LEDS
select LEDS_CLASS
@@ -127,3 +127,12 @@ config IWLWIFI_P2P
support when it is loaded.
Say Y only if you want to experiment with P2P.
+
+config IWLWIFI_EXPERIMENTAL_MFP
+ bool "support MFP (802.11w) even if uCode doesn't advertise"
+ depends on IWLWIFI
+ help
+ This option enables experimental MFP (802.11W) support
+ even if the microcode doesn't advertise it.
+
+ Say Y only if you want to experiment with MFP.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 9dc84a7354db..85d163ed3db1 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
# WIFI
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
-iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o
+iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
@@ -13,7 +13,8 @@ iwlwifi-objs += iwl-6000.o
iwlwifi-objs += iwl-1000.o
iwlwifi-objs += iwl-2000.o
iwlwifi-objs += iwl-pci.o
-iwlwifi-objs += iwl-trans.o
+iwlwifi-objs += iwl-drv.o
+iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 1ef7bfc2ab25..5b0d888f746b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -43,6 +43,7 @@
#include "iwl-agn-hw.h"
#include "iwl-shared.h"
#include "iwl-cfg.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 6
@@ -84,20 +85,19 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
static void iwl1000_nic_config(struct iwl_priv *priv)
{
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
- iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
+ iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG,
APMG_SVR_DIGITAL_VOLTAGE_1_32,
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
-static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
.min_nrg_cck = 95,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 120,
@@ -120,36 +120,22 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
.nrg_th_cca = 62,
};
-static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
-
- hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
- hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
hw_params(priv).sens = &iwl1000_sensitivity;
-
- return 0;
}
static struct iwl_lib_ops iwl1000_lib = {
@@ -169,7 +155,7 @@ static struct iwl_lib_ops iwl1000_lib = {
.temperature = iwlagn_temperature,
};
-static struct iwl_base_params iwl1000_base_params = {
+static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
@@ -184,7 +170,8 @@ static struct iwl_base_params iwl1000_base_params = {
.max_event_log_size = 128,
.wd_disable = true,
};
-static struct iwl_ht_params iwl1000_ht_params = {
+
+static const struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
.smps_mode = IEEE80211_SMPS_DYNAMIC,
@@ -195,19 +182,21 @@ static struct iwl_ht_params iwl1000_ht_params = {
.ucode_api_max = IWL1000_UCODE_API_MAX, \
.ucode_api_ok = IWL1000_UCODE_API_OK, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
+ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
+ .max_data_size = IWLAGN_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.lib = &iwl1000_lib, \
.base_params = &iwl1000_base_params, \
.led_mode = IWL_LED_BLINK
-struct iwl_cfg iwl1000_bgn_cfg = {
+const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
IWL_DEVICE_1000,
.ht_params = &iwl1000_ht_params,
};
-struct iwl_cfg iwl1000_bg_cfg = {
+const struct iwl_cfg iwl1000_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
IWL_DEVICE_1000,
};
@@ -217,6 +206,8 @@ struct iwl_cfg iwl1000_bg_cfg = {
.ucode_api_max = IWL100_UCODE_API_MAX, \
.ucode_api_ok = IWL100_UCODE_API_OK, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
+ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
+ .max_data_size = IWLAGN_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.lib = &iwl1000_lib, \
@@ -224,13 +215,13 @@ struct iwl_cfg iwl1000_bg_cfg = {
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-struct iwl_cfg iwl100_bgn_cfg = {
+const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
IWL_DEVICE_100,
.ht_params = &iwl1000_ht_params,
};
-struct iwl_cfg iwl100_bg_cfg = {
+const struct iwl_cfg iwl100_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BG",
IWL_DEVICE_100,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 094693328dbb..5635b9e2c69e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -87,13 +87,12 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
if (cfg(priv)->iq_invert)
- iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
+ iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
-static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
.min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 80,
.auto_corr_min_ofdm_mrc = 128,
.auto_corr_min_ofdm_x1 = 105,
@@ -116,36 +115,22 @@ static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
.nrg_th_cca = 62,
};
-static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
-
- hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
- hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
hw_params(priv).sens = &iwl2000_sensitivity;
-
- return 0;
}
static struct iwl_lib_ops iwl2000_lib = {
@@ -161,16 +146,13 @@ static struct iwl_lib_ops iwl2000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
static struct iwl_lib_ops iwl2030_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
- .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
- .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
- .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
.nic_config = iwl2000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
@@ -182,12 +164,12 @@ static struct iwl_lib_ops iwl2030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
-static struct iwl_base_params iwl2000_base_params = {
+static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -206,7 +188,7 @@ static struct iwl_base_params iwl2000_base_params = {
};
-static struct iwl_base_params iwl2030_base_params = {
+static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -224,12 +206,12 @@ static struct iwl_base_params iwl2030_base_params = {
.hd_v2 = true,
};
-static struct iwl_ht_params iwl2000_ht_params = {
+static const struct iwl_ht_params iwl2000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
-static struct iwl_bt_params iwl2030_bt_params = {
+static const struct iwl_bt_params iwl2030_bt_params = {
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.advanced_bt_coexist = true,
.agg_time_limit = BT_AGG_THRESHOLD_DEF,
@@ -244,6 +226,8 @@ static struct iwl_bt_params iwl2030_bt_params = {
.ucode_api_max = IWL2000_UCODE_API_MAX, \
.ucode_api_ok = IWL2000_UCODE_API_OK, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
@@ -253,13 +237,13 @@ static struct iwl_bt_params iwl2030_bt_params = {
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
-struct iwl_cfg iwl2000_2bgn_cfg = {
+const struct iwl_cfg iwl2000_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2000_2bgn_d_cfg = {
+const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
@@ -270,6 +254,8 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.ucode_api_max = IWL2030_UCODE_API_MAX, \
.ucode_api_ok = IWL2030_UCODE_API_OK, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2030_lib, \
@@ -281,7 +267,7 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.adv_pm = true, \
.iq_invert = true \
-struct iwl_cfg iwl2030_2bgn_cfg = {
+const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
IWL_DEVICE_2030,
.ht_params = &iwl2000_ht_params,
@@ -292,6 +278,8 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
.ucode_api_max = IWL105_UCODE_API_MAX, \
.ucode_api_ok = IWL105_UCODE_API_OK, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
@@ -303,13 +291,13 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl105_bgn_cfg = {
+const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl105_bgn_d_cfg = {
+const struct iwl_cfg iwl105_bgn_d_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
@@ -320,6 +308,8 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.ucode_api_max = IWL135_UCODE_API_MAX, \
.ucode_api_ok = IWL135_UCODE_API_OK, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2030_lib, \
@@ -332,7 +322,7 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl135_bgn_cfg = {
+const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
IWL_DEVICE_135,
.ht_params = &iwl2000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index b3a365fea7bb..a805e97b89af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -45,6 +45,7 @@
#include "iwl-trans.h"
#include "iwl-shared.h"
#include "iwl-cfg.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -63,27 +64,19 @@
/* NIC configuration for 5000 series */
static void iwl5000_nic_config(struct iwl_priv *priv)
{
- unsigned long flags;
-
iwl_rf_config(priv);
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
/* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
-
-
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
-static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 100,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 105,
@@ -108,7 +101,6 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.min_nrg_cck = 95,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 105,
@@ -162,62 +154,36 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
-static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
+static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
-
- hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
- hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
+ hw_params(priv).rx_chains_num =
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
hw_params(priv).sens = &iwl5000_sensitivity;
-
- return 0;
}
-static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
+static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
-
- hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
- hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
+ hw_params(priv).rx_chains_num =
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
hw_params(priv).sens = &iwl5150_sensitivity;
-
- return 0;
}
static void iwl5150_temperature(struct iwl_priv *priv)
@@ -300,7 +266,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return iwl_trans_send_cmd(trans(priv), &hcmd);
+ return iwl_dvm_send_cmd(priv, &hcmd);
}
static struct iwl_lib_ops iwl5000_lib = {
@@ -339,7 +305,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
};
-static struct iwl_base_params iwl5000_base_params = {
+static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -352,7 +318,8 @@ static struct iwl_base_params iwl5000_base_params = {
.no_idle_support = true,
.wd_disable = true,
};
-static struct iwl_ht_params iwl5000_ht_params = {
+
+static const struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
};
@@ -360,13 +327,15 @@ static struct iwl_ht_params iwl5000_ht_params = {
.fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \
+ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
+ .max_data_size = IWLAGN_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.lib = &iwl5000_lib, \
.base_params = &iwl5000_base_params, \
.led_mode = IWL_LED_BLINK
-struct iwl_cfg iwl5300_agn_cfg = {
+const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
IWL_DEVICE_5000,
/* at least EEPROM 0x11A has wrong info */
@@ -375,7 +344,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5100_bgn_cfg = {
+const struct iwl_cfg iwl5100_bgn_cfg = {
.name = "Intel(R) WiFi Link 5100 BGN",
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
@@ -383,14 +352,14 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5100_abg_cfg = {
+const struct iwl_cfg iwl5100_abg_cfg = {
.name = "Intel(R) WiFi Link 5100 ABG",
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
.valid_rx_ant = ANT_AB, /* .cfg overwrite */
};
-struct iwl_cfg iwl5100_agn_cfg = {
+const struct iwl_cfg iwl5100_agn_cfg = {
.name = "Intel(R) WiFi Link 5100 AGN",
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
@@ -398,11 +367,13 @@ struct iwl_cfg iwl5100_agn_cfg = {
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5350_agn_cfg = {
+const struct iwl_cfg iwl5350_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
+ .max_inst_size = IWLAGN_RTC_INST_SIZE,
+ .max_data_size = IWLAGN_RTC_DATA_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
.lib = &iwl5000_lib,
@@ -416,6 +387,8 @@ struct iwl_cfg iwl5350_agn_cfg = {
.fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \
+ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
+ .max_data_size = IWLAGN_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.lib = &iwl5150_lib, \
@@ -424,14 +397,14 @@ struct iwl_cfg iwl5350_agn_cfg = {
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-struct iwl_cfg iwl5150_agn_cfg = {
+const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
IWL_DEVICE_5150,
.ht_params = &iwl5000_ht_params,
};
-struct iwl_cfg iwl5150_abg_cfg = {
+const struct iwl_cfg iwl5150_abg_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
IWL_DEVICE_5150,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 54b753399e6e..64060cd738b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -82,7 +82,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwl_eeprom_calib_version(priv->shrd) >= 6)
- iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
+ iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -90,31 +90,31 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (iwl_eeprom_calib_version(priv->shrd) >= 6)
- iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
+ iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
- iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
+ iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_6050_1x2);
}
+static void iwl6000i_additional_nic_config(struct iwl_priv *priv)
+{
+ /* 2x2 IPA phy type */
+ iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
+}
+
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
iwl_rf_config(priv);
- /* no locking required for register write */
- if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
- /* 2x2 IPA phy type */
- iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
- CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
- }
/* do additional nic configuration if needed */
if (cfg(priv)->additional_nic_config)
- cfg(priv)->additional_nic_config(priv);
+ cfg(priv)->additional_nic_config(priv);
}
-static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.min_nrg_cck = 110,
- .max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 80,
.auto_corr_min_ofdm_mrc = 128,
.auto_corr_min_ofdm_x1 = 105,
@@ -137,37 +137,24 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.nrg_th_cca = 62,
};
-static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
+static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
- if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
- iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- cfg(priv)->base_params->num_of_queues =
- iwlagn_mod_params.num_of_queues;
-
- hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
-
- hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
- hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
-
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).tx_chains_num =
+ num_of_ant(hw_params(priv).valid_tx_ant);
if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(cfg(priv)->valid_rx_ant);
- hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
- hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+ num_of_ant(hw_params(priv).valid_rx_ant);
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
hw_params(priv).sens = &iwl6000_sensitivity;
- return 0;
}
static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
@@ -238,7 +225,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return iwl_trans_send_cmd(trans(priv), &hcmd);
+ return iwl_dvm_send_cmd(priv, &hcmd);
}
static struct iwl_lib_ops iwl6000_lib = {
@@ -255,16 +242,13 @@ static struct iwl_lib_ops iwl6000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
static struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
- .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
- .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
.eeprom_ops = {
@@ -277,12 +261,12 @@ static struct iwl_lib_ops iwl6030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
+ .enhanced_txpower = true,
},
.temperature = iwlagn_temperature,
};
-static struct iwl_base_params iwl6000_base_params = {
+static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -299,7 +283,7 @@ static struct iwl_base_params iwl6000_base_params = {
.shadow_reg_enable = true,
};
-static struct iwl_base_params iwl6050_base_params = {
+static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -315,7 +299,8 @@ static struct iwl_base_params iwl6050_base_params = {
.max_event_log_size = 1024,
.shadow_reg_enable = true,
};
-static struct iwl_base_params iwl6000_g2_base_params = {
+
+static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -332,12 +317,12 @@ static struct iwl_base_params iwl6000_g2_base_params = {
.shadow_reg_enable = true,
};
-static struct iwl_ht_params iwl6000_ht_params = {
+static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
-static struct iwl_bt_params iwl6000_bt_params = {
+static const struct iwl_bt_params iwl6000_bt_params = {
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.advanced_bt_coexist = true,
.agg_time_limit = BT_AGG_THRESHOLD_DEF,
@@ -351,6 +336,8 @@ static struct iwl_bt_params iwl6000_bt_params = {
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
@@ -358,39 +345,53 @@ static struct iwl_bt_params iwl6000_bt_params = {
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
-struct iwl_cfg iwl6005_2agn_cfg = {
+const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6005_2abg_cfg = {
+const struct iwl_cfg iwl6005_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
IWL_DEVICE_6005,
};
-struct iwl_cfg iwl6005_2bg_cfg = {
+const struct iwl_cfg iwl6005_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
IWL_DEVICE_6005,
};
-struct iwl_cfg iwl6005_2agn_sff_cfg = {
+const struct iwl_cfg iwl6005_2agn_sff_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6005_2agn_d_cfg = {
+const struct iwl_cfg iwl6005_2agn_d_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
+const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
+};
+
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.lib = &iwl6030_lib, \
@@ -400,53 +401,53 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
-struct iwl_cfg iwl6030_2agn_cfg = {
+const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6030_2abg_cfg = {
+const struct iwl_cfg iwl6030_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
IWL_DEVICE_6030,
};
-struct iwl_cfg iwl6030_2bgn_cfg = {
+const struct iwl_cfg iwl6030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6030_2bg_cfg = {
+const struct iwl_cfg iwl6030_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
IWL_DEVICE_6030,
};
-struct iwl_cfg iwl6035_2agn_cfg = {
+const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl1030_bgn_cfg = {
+const struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl1030_bg_cfg = {
+const struct iwl_cfg iwl1030_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
IWL_DEVICE_6030,
};
-struct iwl_cfg iwl130_bgn_cfg = {
+const struct iwl_cfg iwl130_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
.rx_with_siso_diversity = true,
};
-struct iwl_cfg iwl130_bg_cfg = {
+const struct iwl_cfg iwl130_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 130 BG",
IWL_DEVICE_6030,
.rx_with_siso_diversity = true,
@@ -460,27 +461,29 @@ struct iwl_cfg iwl130_bg_cfg = {
.ucode_api_max = IWL6000_UCODE_API_MAX, \
.ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
+ .additional_nic_config = iwl6000i_additional_nic_config,\
.base_params = &iwl6000_base_params, \
- .pa_type = IWL_PA_INTERNAL, \
.led_mode = IWL_LED_BLINK
-struct iwl_cfg iwl6000i_2agn_cfg = {
+const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
IWL_DEVICE_6000i,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6000i_2abg_cfg = {
+const struct iwl_cfg iwl6000i_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
IWL_DEVICE_6000i,
};
-struct iwl_cfg iwl6000i_2bg_cfg = {
+const struct iwl_cfg iwl6000i_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
IWL_DEVICE_6000i,
};
@@ -489,6 +492,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
.lib = &iwl6000_lib, \
@@ -499,13 +504,13 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-struct iwl_cfg iwl6050_2agn_cfg = {
+const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
IWL_DEVICE_6050,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6050_2abg_cfg = {
+const struct iwl_cfg iwl6050_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
IWL_DEVICE_6050,
};
@@ -514,6 +519,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
.lib = &iwl6000_lib, \
.additional_nic_config = iwl6150_additional_nic_config, \
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
@@ -522,23 +529,25 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-struct iwl_cfg iwl6150_bgn_cfg = {
+const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
IWL_DEVICE_6150,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6150_bg_cfg = {
+const struct iwl_cfg iwl6150_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
IWL_DEVICE_6150,
};
-struct iwl_cfg iwl6000_3agn_cfg = {
+const struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
+ .max_inst_size = IWL60_RTC_INST_SIZE,
+ .max_data_size = IWL60_RTC_DATA_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.lib = &iwl6000_lib,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 50ff849c9f67..84cbe7bb504c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +73,14 @@
* INIT calibrations framework
*****************************************************************************/
+/* Opaque calibration results */
+struct iwl_calib_result {
+ struct list_head list;
+ size_t cmd_len;
+ struct iwl_calib_hdr hdr;
+ /* data follows */
+};
+
struct statistics_general_data {
u32 beacon_silence_rssi_a;
u32 beacon_silence_rssi_b;
@@ -82,7 +90,7 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-int iwl_send_calib_results(struct iwl_trans *trans)
+int iwl_send_calib_results(struct iwl_priv *priv)
{
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
@@ -90,15 +98,15 @@ int iwl_send_calib_results(struct iwl_trans *trans)
};
struct iwl_calib_result *res;
- list_for_each_entry(res, &trans->calib_results, list) {
+ list_for_each_entry(res, &priv->calib_results, list) {
int ret;
hcmd.len[0] = res->cmd_len;
hcmd.data[0] = &res->hdr;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_trans_send_cmd(trans, &hcmd);
+ ret = iwl_dvm_send_cmd(priv, &hcmd);
if (ret) {
- IWL_ERR(trans, "Error %d on calib cmd %d\n",
+ IWL_ERR(priv, "Error %d on calib cmd %d\n",
ret, res->hdr.op_code);
return ret;
}
@@ -107,7 +115,7 @@ int iwl_send_calib_results(struct iwl_trans *trans)
return 0;
}
-int iwl_calib_set(struct iwl_trans *trans,
+int iwl_calib_set(struct iwl_priv *priv,
const struct iwl_calib_hdr *cmd, int len)
{
struct iwl_calib_result *res, *tmp;
@@ -119,7 +127,7 @@ int iwl_calib_set(struct iwl_trans *trans,
memcpy(&res->hdr, cmd, len);
res->cmd_len = len;
- list_for_each_entry(tmp, &trans->calib_results, list) {
+ list_for_each_entry(tmp, &priv->calib_results, list) {
if (tmp->hdr.op_code == res->hdr.op_code) {
list_replace(&tmp->list, &res->list);
kfree(tmp);
@@ -128,16 +136,16 @@ int iwl_calib_set(struct iwl_trans *trans,
}
/* wasn't in list already */
- list_add_tail(&res->list, &trans->calib_results);
+ list_add_tail(&res->list, &priv->calib_results);
return 0;
}
-void iwl_calib_free_results(struct iwl_trans *trans)
+void iwl_calib_free_results(struct iwl_priv *priv)
{
struct iwl_calib_result *res, *tmp;
- list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+ list_for_each_entry_safe(res, tmp, &priv->calib_results, list) {
list_del(&res->list);
kfree(res);
}
@@ -492,7 +500,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- return iwl_trans_send_cmd(trans(priv), &cmd_out);
+ return iwl_dvm_send_cmd(priv, &cmd_out);
}
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
@@ -581,7 +589,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
- return iwl_trans_send_cmd(trans(priv), &cmd_out);
+ return iwl_dvm_send_cmd(priv, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -634,7 +642,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
data->last_bad_plcp_cnt_cck = 0;
data->last_fa_cnt_cck = 0;
- if (priv->enhance_sensitivity_table)
+ if (priv->fw->enhance_sensitivity_table)
ret |= iwl_enhance_sensitivity_write(priv);
else
ret |= iwl_sensitivity_write(priv);
@@ -653,7 +661,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
struct iwl_sensitivity_data *data = NULL;
struct statistics_rx_non_phy *rx_info;
struct statistics_rx_phy *ofdm, *cck;
- unsigned long flags;
struct statistics_general_data statis;
if (priv->disable_sens_cal)
@@ -666,13 +673,13 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
return;
}
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ spin_lock_bh(&priv->statistics.lock);
rx_info = &priv->statistics.rx_non_phy;
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
return;
}
@@ -696,7 +703,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
statis.beacon_energy_c =
le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
@@ -745,7 +752,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
- if (priv->enhance_sensitivity_table)
+ if (priv->fw->enhance_sensitivity_table)
iwl_enhance_sensitivity_write(priv);
else
iwl_sensitivity_write(priv);
@@ -847,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(cfg(priv)->valid_tx_ant);
+ find_first_chain(hw_params(priv).valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -872,10 +879,8 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
}
static void iwlagn_gain_computation(struct iwl_priv *priv,
- u32 average_noise[NUM_RX_CHAINS],
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
+ u32 average_noise[NUM_RX_CHAINS],
+ u8 default_chain)
{
int i;
s32 delta_g;
@@ -923,7 +928,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
priv->phy_calib_chain_noise_gain_cmd);
cmd.delta_gain_1 = data->delta_gain_code[1];
cmd.delta_gain_2 = data->delta_gain_code[2];
- iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
+ iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
CMD_ASYNC, sizeof(cmd), &cmd);
data->radio_write = 1;
@@ -956,7 +961,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
u16 stat_chnum = INITIALIZATION_VALUE;
u8 rxon_band24;
u8 stat_band24;
- unsigned long flags;
struct statistics_rx_non_phy *rx_info;
/*
@@ -981,13 +985,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
}
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ spin_lock_bh(&priv->statistics.lock);
rx_info = &priv->statistics.rx_non_phy;
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
return;
}
@@ -1002,7 +1006,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
return;
}
@@ -1021,7 +1025,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock_bh(&priv->statistics.lock);
data->beacon_count++;
@@ -1081,8 +1085,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
min_average_noise, min_average_noise_antenna_i);
iwlagn_gain_computation(priv, average_noise,
- min_average_noise_antenna_i, min_average_noise,
- find_first_chain(cfg(priv)->valid_rx_ant));
+ find_first_chain(hw_params(priv).valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index 10275ce92bde..9ed6683314a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 123ef5e129d5..d0ec0abd3c89 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index ca78e91de86c..56f41c9409d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/sched.h>
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -52,7 +51,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
"TX Power requested while scanning!\n"))
return -EAGAIN;
@@ -77,17 +76,19 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
+ if (IWL_UCODE_API(priv->fw->ucode_ver) == 1)
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
sizeof(tx_power_cmd), &tx_power_cmd);
}
void iwlagn_temperature(struct iwl_priv *priv)
{
+ lockdep_assert_held(&priv->statistics.lock);
+
/* store temperature from correct statistics (in Celsius) */
priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
iwl_tt_handler(priv);
@@ -233,19 +234,19 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
ieee80211_stop_queues(priv->hw);
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
@@ -255,7 +256,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
iwl_trans_wait_tx_queue_empty(trans(priv));
done:
ieee80211_wake_queues(priv->hw);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/*
@@ -434,12 +435,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
if (cfg(priv)->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
@@ -452,7 +453,7 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
struct iwl_rxon_context *ctx, *found_ctx = NULL;
bool found_ap = false;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* Check whether AP or GO mode is active. */
if (rssi_ena) {
@@ -565,7 +566,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
break;
}
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
/*
* We can not send command to firmware while scanning. When the scan
@@ -574,7 +575,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
* STATUS_SCANNING to avoid race when queue_work two times from
* different notifications, but quit and not perform any work at all.
*/
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
+ if (test_bit(STATUS_SCAN_HW, &priv->status))
goto out;
iwl_update_chain_flags(priv);
@@ -593,7 +594,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
*/
iwlagn_bt_coex_rssi_monitor(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/*
@@ -700,17 +701,16 @@ static void iwlagn_set_kill_msk(struct iwl_priv *priv,
priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
/* schedule to send runtime bt_config */
- queue_work(priv->shrd->workqueue, &priv->bt_runtime_config);
+ queue_work(priv->workqueue, &priv->bt_runtime_config);
}
}
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
- unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
+ struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
@@ -745,7 +745,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = coex->bt_status;
- queue_work(priv->shrd->workqueue,
+ queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
}
@@ -754,9 +754,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
/* FIXME: based on notification, adjust the prio_boost */
- spin_lock_irqsave(&priv->shrd->lock, flags);
priv->bt_ci_compliance = coex->bt_ci_compliance;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
return 0;
}
@@ -959,7 +957,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
void *_data)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct wowlan_key_data *data = _data;
struct iwl_rxon_context *ctx = data->ctx;
struct aes_sc *aes_sc, *aes_tx_sc = NULL;
@@ -971,7 +969,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
u16 p1k[IWLAGN_P1K_SIZE];
int ret, i;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
@@ -1077,7 +1075,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
int iwlagn_send_patterns(struct iwl_priv *priv,
@@ -1117,13 +1115,12 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
}
cmd.data[0] = pattern_cmd;
- err = iwl_trans_send_cmd(trans(priv), &cmd);
+ err = iwl_dvm_send_cmd(priv, &cmd);
kfree(pattern_cmd);
return err;
}
-int iwlagn_suspend(struct iwl_priv *priv,
- struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
{
struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
struct iwl_rxon_cmd rxon;
@@ -1192,11 +1189,12 @@ int iwlagn_suspend(struct iwl_priv *priv,
memcpy(&rxon, &ctx->active, sizeof(rxon));
+ priv->ucode_loaded = false;
iwl_trans_stop_device(trans(priv));
- priv->shrd->wowlan = true;
+ priv->wowlan = true;
- ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
+ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
if (ret)
goto out;
@@ -1224,11 +1222,11 @@ int iwlagn_suspend(struct iwl_priv *priv,
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
ieee80211_iter_keys(priv->hw, ctx->vif,
iwlagn_wowlan_program_keys,
&key_data);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (key_data.error) {
ret = -EIO;
goto out;
@@ -1243,13 +1241,13 @@ int iwlagn_suspend(struct iwl_priv *priv,
.len[0] = sizeof(*key_data.rsc_tsc),
};
- ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+ ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd);
if (ret)
goto out;
}
if (key_data.use_tkip) {
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_TKIP_PARAMS,
CMD_SYNC, sizeof(tkip_cmd),
&tkip_cmd);
@@ -1265,7 +1263,7 @@ int iwlagn_suspend(struct iwl_priv *priv,
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
kek_kck_cmd.replay_ctr = priv->replay_ctr;
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_KEK_KCK_MATERIAL,
CMD_SYNC, sizeof(kek_kck_cmd),
&kek_kck_cmd);
@@ -1274,12 +1272,12 @@ int iwlagn_suspend(struct iwl_priv *priv,
}
}
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
if (ret)
goto out;
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
CMD_SYNC, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
@@ -1291,3 +1289,41 @@ int iwlagn_suspend(struct iwl_priv *priv,
return ret;
}
#endif
+
+int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
+ IWL_WARN(priv, "Not sending command - %s KILL\n",
+ iwl_is_rfkill(priv) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ /*
+ * Synchronous commands from this op-mode must hold
+ * the mutex, this ensures we don't try to send two
+ * (or more) synchronous commands at a time.
+ */
+ if (cmd->flags & CMD_SYNC)
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
+ !(cmd->flags & CMD_ON_DEMAND)) {
+ IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
+ return -EIO;
+ }
+
+ return iwl_trans_send_cmd(trans(priv), cmd);
+}
+
+int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+ u32 flags, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_dvm_send_cmd(priv, &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 334b5ae8fdd4..53f8c51cfcdb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -38,6 +38,7 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-agn.h"
+#include "iwl-op-mode.h"
#define RS_NAME "iwl-agn-rs"
@@ -869,19 +870,16 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
{
struct iwl_scale_tbl_info *tbl;
bool full_concurrent = priv->bt_full_concurrent;
- unsigned long flags;
if (priv->bt_ant_couple_ok) {
/*
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- spin_lock_irqsave(&priv->shrd->lock, flags);
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
@@ -892,7 +890,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
- queue_work(priv->shrd->workqueue, &priv->bt_full_concurrency);
+ queue_work(priv->workqueue, &priv->bt_full_concurrency);
}
}
@@ -909,7 +907,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_link_quality_cmd *table;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r;
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
enum mac80211_rate_control_flags mac_flags;
u32 tx_rate;
@@ -2678,7 +2677,6 @@ out:
* which requires station table entry to exist).
*/
static void rs_initialize_lq(struct iwl_priv *priv,
- struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta)
{
@@ -2737,7 +2735,9 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
+ struct iwl_op_mode *op_mode __maybe_unused =
+ (struct iwl_op_mode *)priv_r;
+ struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = priv_sta;
int rate_idx;
@@ -2805,9 +2805,10 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
gfp_t gfp)
{
struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
- struct iwl_priv *priv;
+ struct iwl_op_mode *op_mode __maybe_unused =
+ (struct iwl_op_mode *)priv_rate;
+ struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
- priv = (struct iwl_priv *)priv_rate;
IWL_DEBUG_RATE(priv, "create station rate scale window\n");
return &sta_priv->lq_sta;
@@ -2910,7 +2911,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta->dbg_fixed_rate = 0;
#endif
- rs_initialize_lq(priv, conf, sta, lq_sta);
+ rs_initialize_lq(priv, sta, lq_sta);
}
static void rs_fill_link_cmd(struct iwl_priv *priv,
@@ -3074,7 +3075,8 @@ static void rs_free(void *priv_rate)
static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
void *priv_sta)
{
- struct iwl_priv *priv __maybe_unused = priv_r;
+ struct iwl_op_mode *op_mode __maybe_unused = priv_r;
+ struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
IWL_DEBUG_RATE(priv, "enter\n");
IWL_DEBUG_RATE(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 6675b3c816d9..203b1c13c491 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index b22b2976f899..44c6f712b77d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -131,26 +131,27 @@ const char *get_cmd_string(u8 cmd)
******************************************************************************/
static int iwlagn_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_error_resp *err_resp = (void *)pkt->data;
IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
"seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
+ le32_to_cpu(err_resp->error_type),
+ get_cmd_string(err_resp->cmd_id),
+ err_resp->cmd_id,
+ le16_to_cpu(err_resp->bad_cmd_seq_num),
+ le32_to_cpu(err_resp->error_info));
return 0;
}
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+ struct iwl_csa_notification *csa = (void *)pkt->data;
/*
* MULTI-FIXME
* See iwlagn_mac_channel_switch.
@@ -158,7 +159,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return 0;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
@@ -177,11 +178,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+ struct iwl_spectrum_notification *report = (void *)pkt->data;
if (!report->state) {
IWL_DEBUG_11H(priv,
@@ -195,12 +196,12 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
}
static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ struct iwl_sleep_notification *sleep = (void *)pkt->data;
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
@@ -208,7 +209,7 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
}
static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -217,16 +218,16 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for %s:\n", len,
get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
return 0;
}
static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
+ struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
#ifdef CONFIG_IWLWIFI_DEBUG
u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -266,6 +267,8 @@ static bool iwlagn_good_ack_health(struct iwl_priv *priv,
if (priv->agg_tids_count)
return true;
+ lockdep_assert_held(&priv->statistics.lock);
+
old = &priv->statistics.tx;
actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
@@ -318,7 +321,7 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
unsigned int msecs)
{
int delta;
- int threshold = cfg(priv)->base_params->plcp_delta_threshold;
+ int threshold = priv->plcp_delta_threshold;
if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
@@ -352,7 +355,7 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
{
unsigned int msecs;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
@@ -487,7 +490,7 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
#endif
static int iwlagn_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
unsigned long stamp = jiffies;
@@ -509,9 +512,11 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
len);
+ spin_lock(&priv->statistics.lock);
+
if (len == sizeof(struct iwl_bt_notif_statistics)) {
struct iwl_bt_notif_statistics *stats;
- stats = &pkt->u.stats_bt;
+ stats = (void *)&pkt->data;
flag = &stats->flag;
common = &stats->general.common;
rx_non_phy = &stats->rx.general.common;
@@ -529,7 +534,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
#endif
} else if (len == sizeof(struct iwl_notif_statistics)) {
struct iwl_notif_statistics *stats;
- stats = &pkt->u.stats;
+ stats = (void *)&pkt->data;
flag = &stats->flag;
common = &stats->general.common;
rx_non_phy = &stats->rx.general;
@@ -542,6 +547,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
len, sizeof(struct iwl_bt_notif_statistics),
sizeof(struct iwl_notif_statistics));
+ spin_unlock(&priv->statistics.lock);
return 0;
}
@@ -569,7 +575,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
priv->rx_statistics_jiffies = stamp;
- set_bit(STATUS_STATISTICS, &priv->shrd->status);
+ set_bit(STATUS_STATISTICS, &priv->status);
/* Reschedule the statistics timer to occur in
* reg_recalib_period seconds to ensure we get a
@@ -578,23 +584,27 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
mod_timer(&priv->statistics_periodic, jiffies +
msecs_to_jiffies(reg_recalib_period * 1000));
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->shrd->status)) &&
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
iwlagn_rx_calc_noise(priv);
- queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
+ queue_work(priv->workqueue, &priv->run_time_calib_work);
}
if (cfg(priv)->lib->temperature && change)
cfg(priv)->lib->temperature(priv);
+
+ spin_unlock(&priv->statistics.lock);
+
return 0;
}
static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_notif_statistics *stats = (void *)pkt->data;
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+ if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
memset(&priv->accum_stats, 0,
sizeof(priv->accum_stats));
@@ -612,12 +622,13 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->shrd->status;
+ struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+ u32 flags = le32_to_cpu(card_state_notif->flags);
+ unsigned long status = priv->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
@@ -628,16 +639,16 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
+ iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
+ iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
if (flags & CT_CARD_DISABLED)
@@ -647,32 +658,31 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
else
- clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)))
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
else
- wake_up(&priv->shrd->wait_command_queue);
+ wake_up(&trans(priv)->wait_command_queue);
return 0;
}
static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
+ struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data;
- missed_beacon = &pkt->u.missed_beacon;
if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv,
@@ -681,7 +691,7 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->shrd->status))
+ if (!test_bit(STATUS_SCANNING, &priv->status))
iwl_init_sensitivity(priv);
}
return 0;
@@ -690,13 +700,13 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->last_phy_res_valid = true;
- memcpy(&priv->last_phy_res, pkt->u.raw,
+ memcpy(&priv->last_phy_res, pkt->data,
sizeof(struct iwl_rx_phy_res));
return 0;
}
@@ -757,12 +767,14 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
struct iwl_rxon_context *ctx;
+ struct page *p;
+ int offset;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
@@ -782,7 +794,9 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
return;
}
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+ offset = (void *)hdr - rxb_addr(rxb);
+ p = rxb_steal_page(rxb);
+ skb_add_rx_frag(skb, 0, p, offset, len);
iwl_update_stats(priv, false, fc, len);
@@ -793,23 +807,18 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
* sometimes even after already having transmitted frames for the
* association because the new RXON may reset the information.
*/
- if (unlikely(ieee80211_is_beacon(fc))) {
+ if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
for_each_context(priv, ctx) {
- if (!ctx->last_tx_rejected)
- continue;
if (compare_ether_addr(hdr->addr3,
ctx->active.bssid_addr))
continue;
- ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
- "channel got active");
+ iwlagn_lift_passive_no_rx(priv);
}
}
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
- rxb->page = NULL;
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -915,7 +924,7 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *header;
@@ -938,12 +947,12 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res = (struct iwl_rx_phy_res *)pkt->data;
+ header = (struct ieee80211_hdr *)(pkt->data + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
@@ -952,10 +961,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
return 0;
}
phy_res = &priv->last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
ampdu_status = iwlagn_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
@@ -1035,12 +1044,12 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
}
static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_wipan_noa_data *new_data, *old_data;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+ struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data;
/* no condition -- we're in softirq */
old_data = rcu_dereference_protected(priv->noa_data, true);
@@ -1086,7 +1095,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
- int (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+ int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
handlers = priv->rx_handlers;
@@ -1131,20 +1140,20 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
/* set up notification wait support */
- spin_lock_init(&priv->shrd->notif_wait_lock);
- INIT_LIST_HEAD(&priv->shrd->notif_waits);
- init_waitqueue_head(&priv->shrd->notif_waitq);
+ iwl_notification_wait_init(&priv->notif_wait);
/* Set up BT Rx handlers */
- if (cfg(priv)->lib->bt_rx_handler_setup)
- cfg(priv)->lib->bt_rx_handler_setup(priv);
-
+ if (cfg(priv)->bt_params)
+ iwlagn_bt_rx_handler_setup(priv);
}
-int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
- struct iwl_device_cmd *cmd)
+int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ void (*pre_rx_handler)(struct iwl_priv *,
+ struct iwl_rx_cmd_buffer *);
int err = 0;
/*
@@ -1152,40 +1161,34 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
* even if the RX handler consumes the RXB we have
* access to it in the notification wait entry.
*/
- if (!list_empty(&priv->shrd->notif_waits)) {
- struct iwl_notification_wait *w;
-
- spin_lock(&priv->shrd->notif_wait_lock);
- list_for_each_entry(w, &priv->shrd->notif_waits, list) {
- if (w->cmd != pkt->hdr.cmd)
- continue;
+ iwl_notification_wait_notify(&priv->notif_wait, pkt);
+
+ /* RX data may be forwarded to userspace (using pre_rx_handler) in one
+ * of two cases: the first, that the user owns the uCode through
+ * testmode - in such case the pre_rx_handler is set and no further
+ * processing takes place. The other case is when the user want to
+ * monitor the rx w/o affecting the regular flow - the pre_rx_handler
+ * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow
+ * continues.
+ * We need to use ACCESS_ONCE to prevent a case where the handler
+ * changes between the check and the call.
+ */
+ pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler);
+ if (pre_rx_handler)
+ pre_rx_handler(priv, rxb);
+ if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * rx_handlers table. See iwl_setup_rx_handlers() */
+ if (priv->rx_handlers[pkt->hdr.cmd]) {
+ priv->rx_handlers_stats[pkt->hdr.cmd]++;
+ err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+ } else {
+ /* No handling needed */
IWL_DEBUG_RX(priv,
- "Notif: %s, 0x%02x - wake the callers up\n",
- get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- w->triggered = true;
- if (w->fn)
- w->fn(trans(priv), pkt, w->fn_data);
+ "No handler needed for %s, 0x%02x\n",
+ get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
}
- spin_unlock(&priv->shrd->notif_wait_lock);
-
- wake_up_all(&priv->shrd->notif_waitq);
- }
-
- if (priv->pre_rx_handler)
- priv->pre_rx_handler(priv, rxb);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- priv->rx_handlers_stats[pkt->hdr.cmd]++;
- err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "No handler needed for %s, 0x%02x\n",
- get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
}
return err;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 1c6659416621..2e1a31797a9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -39,7 +39,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -60,13 +60,13 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
u8 old_dev_type = send->dev_type;
int ret;
- iwl_init_notification_wait(priv->shrd, &disable_wait,
- REPLY_WIPAN_DEACTIVATION_COMPLETE,
- NULL, NULL);
+ iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE,
+ NULL, NULL);
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -74,9 +74,10 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
- iwl_remove_notification(priv->shrd, &disable_wait);
+ iwl_remove_notification(&priv->notif_wait, &disable_wait);
} else {
- ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
+ ret = iwl_wait_notification(&priv->notif_wait,
+ &disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
@@ -92,7 +93,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
sizeof(*send), send);
send->filter_flags = old_filter;
@@ -121,7 +122,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -131,7 +132,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
static int iwlagn_update_beacon(struct iwl_priv *priv,
struct ieee80211_vif *vif)
{
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
@@ -180,7 +181,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
@@ -266,7 +267,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -274,8 +275,6 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
}
memcpy(active, &ctx->staging, sizeof(*active));
- iwl_reprogram_ap_sta(priv, ctx);
-
/* IBSS beacon needs to be sent after setting assoc */
if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
if (iwlagn_update_beacon(priv, ctx->vif))
@@ -315,7 +314,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
@@ -362,7 +361,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
+ if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
@@ -378,7 +377,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
ctx_pan->beacon_int;
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
}
@@ -387,7 +386,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -420,12 +419,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- return -EINVAL;
+ lockdep_assert_held(&priv->mutex);
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EBUSY;
/* This function hardcodes a bunch of dual-mode assumptions */
@@ -434,10 +430,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (!ctx->is_active)
return 0;
- /* override BSSID if necessary due to preauth */
- if (ctx->preauth_bssid)
- memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
-
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
@@ -445,8 +437,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
- if (!(cfg(priv)->ht_params &&
- cfg(priv)->ht_params->use_rts_for_aggregation))
+ if (!hw_params(priv).use_rts_for_aggregation)
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -466,7 +457,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
(priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_channel));
@@ -549,7 +540,7 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = conf->channel;
@@ -558,17 +549,14 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- goto out;
-
- if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
}
- if (!iwl_is_ready(priv->shrd)) {
+ if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
}
@@ -590,8 +578,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- unsigned long flags;
-
ch_info = iwl_get_channel_info(priv, channel->band,
channel->hw_value);
if (!is_channel_valid(ch_info)) {
@@ -600,8 +586,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
for_each_context(priv, ctx) {
/* Configure HT40 channels */
if (ctx->ht.enabled != conf_is_ht(conf))
@@ -636,8 +620,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->vif);
}
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
iwl_update_bcast_stations(priv);
/*
@@ -668,7 +650,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
iwlagn_commit_rxon(priv, ctx);
}
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -685,7 +667,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *ht_cap;
bool need_multiple;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -789,7 +771,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr,
priv->phy_calib_chain_noise_reset_cmd);
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_PHY_CALIBRATION_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
@@ -805,22 +787,22 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
int ret;
bool force = false;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (unlikely(!iwl_is_ready(priv->shrd))) {
+ if (unlikely(!iwl_is_ready(priv))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return;
}
if (unlikely(!ctx->vif)) {
IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return;
}
@@ -840,7 +822,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
+ priv->timestamp = bss_conf->last_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
/*
@@ -851,12 +833,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
* not get stuck in this case either since it
* can happen if userspace gets confused.
*/
- if (ctx->last_tx_rejected) {
- ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv),
- ctx->ctxid,
- "Disassoc: flush queue");
- }
+ iwlagn_lift_passive_no_rx(priv);
+
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
if (ctx->ctxid == IWL_RXON_CTX_BSS)
@@ -900,6 +878,22 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ /*
+ * If the ucode decides to do beacon filtering before
+ * association, it will lose beacons that are needed
+ * before sending frames out on passive channels. This
+ * causes association failures on those channels. Enable
+ * receiving beacons in such cases.
+ */
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (!bss_conf->assoc)
+ ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
+ else
+ ctx->staging.filter_flags &=
+ ~RXON_FILTER_BCON_AWARE_MSK;
+ }
+
if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
iwlagn_commit_rxon(priv, ctx);
@@ -916,7 +910,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (!priv->disable_chain_noise_cal)
iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
- WARN_ON(ctx->preauth_bssid);
}
if (changes & BSS_CHANGED_IBSS) {
@@ -934,7 +927,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
IWL_ERR(priv, "Error sending IBSS beacon\n");
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
void iwlagn_post_scan(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index e483cfa8d14e..c4175603864b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -26,7 +26,7 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
-
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "iwl-dev.h"
@@ -34,10 +34,14 @@
#include "iwl-agn.h"
#include "iwl-trans.h"
-/* priv->shrd->sta_lock must be held */
-static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
+ lockdep_assert_held(&priv->sta_lock);
+ if (sta_id >= IWLAGN_STATION_COUNT) {
+ IWL_ERR(priv, "invalid sta_id %u", sta_id);
+ return -EINVAL;
+ }
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
"addr %pM\n",
@@ -53,14 +57,15 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
sta_id, priv->stations[sta_id].sta.sta.addr);
}
+ return 0;
}
static int iwl_process_add_sta_resp(struct iwl_priv *priv,
struct iwl_addsta_cmd *addsta,
struct iwl_rx_packet *pkt)
{
+ struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
int ret = -EIO;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
@@ -72,13 +77,12 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock(&priv->sta_lock);
- switch (pkt->u.add_sta.status) {
+ switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_sta_ucode_activate(priv, sta_id);
- ret = 0;
+ ret = iwl_sta_ucode_activate(priv, sta_id);
break;
case ADD_STA_NO_ROOM_IN_TABLE:
IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
@@ -94,7 +98,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
break;
default:
IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
+ add_sta_resp->status);
break;
}
@@ -115,12 +119,12 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
return ret;
}
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -150,14 +154,14 @@ int iwl_send_add_sta(struct iwl_priv *priv,
might_sleep();
}
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret || (flags & CMD_ASYNC))
return ret;
/*else the command was successfully sent in SYNC mode, need to free
* the reply page */
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
if (cmd.handler_status)
IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
@@ -166,34 +170,38 @@ int iwl_send_add_sta(struct iwl_priv *priv,
return cmd.handler_status;
}
-static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
+static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx,
+ __le32 *flags, __le32 *mask)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
u8 mimo_ps_mode;
+ *mask = STA_FLG_RTS_MIMO_PROT_MSK |
+ STA_FLG_MIMO_DIS_MSK |
+ STA_FLG_HT40_EN_MSK |
+ STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENSITY_MSK;
+ *flags = 0;
+
if (!sta || !sta_ht_inf->ht_supported)
- goto done;
+ return;
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+
+ IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
"static" :
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
"dynamic" : "disabled");
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
switch (mimo_ps_mode) {
case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ *flags |= STA_FLG_MIMO_DIS_MSK;
break;
case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ *flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
case WLAN_HT_CAP_SM_PS_DISABLED:
break;
@@ -202,20 +210,53 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
break;
}
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+ *flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+ *flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
+ *flags |= STA_FLG_HT40_EN_MSK;
+}
+
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_sta *sta)
+{
+ u8 sta_id = iwl_sta_id(sta);
+ __le32 flags, mask;
+ struct iwl_addsta_cmd cmd;
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[sta_id].sta.station_flags &= ~mask;
+ priv->stations[sta_id].sta.station_flags |= flags;
+ spin_unlock_bh(&priv->sta_lock);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mode = STA_CONTROL_MODIFY_MSK;
+ cmd.station_flags_msk = mask;
+ cmd.station_flags = flags;
+ cmd.sta.sta_id = sta_id;
+
+ return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
+}
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
+static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ __le32 flags, mask;
+
+ iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+ lockdep_assert_held(&priv->sta_lock);
+ priv->stations[index].sta.station_flags &= ~mask;
+ priv->stations[index].sta.station_flags |= flags;
}
/**
@@ -314,18 +355,17 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap,
struct ieee80211_sta *sta, u8 *sta_id_r)
{
- unsigned long flags_spin;
int ret = 0;
u8 sta_id;
struct iwl_addsta_cmd sta_cmd;
*sta_id_r = 0;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
addr);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
@@ -337,7 +377,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
IWL_DEBUG_INFO(priv, "STA %d already in process of being "
"added.\n", sta_id);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EEXIST;
}
@@ -345,24 +385,24 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
"adding again.\n", sta_id, addr);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EEXIST;
}
priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
memcpy(&sta_cmd, &priv->stations[sta_id].sta,
sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
/* Add station to device's station table */
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
if (ret) {
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
priv->stations[sta_id].sta.sta.addr);
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
*sta_id_r = sta_id;
return ret;
@@ -370,11 +410,11 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
/**
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->shrd->sta_lock must be held
*/
static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
{
+ lockdep_assert_held(&priv->sta_lock);
+
/* Ucode must be active and driver must be non active */
if ((priv->stations[sta_id].used &
(IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
@@ -393,8 +433,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
{
struct iwl_rx_packet *pkt;
int ret;
-
- unsigned long flags_spin;
struct iwl_rem_sta_cmd rm_sta_cmd;
struct iwl_host_cmd cmd = {
@@ -410,12 +448,12 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
cmd.flags |= CMD_WANT_SKB;
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret)
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
pkt->hdr.flags);
@@ -423,14 +461,13 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
}
if (!ret) {
- switch (pkt->u.rem_sta.status) {
+ struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
+ switch (rem_sta_resp->status) {
case REM_STA_SUCCESS_MSK:
if (!temporary) {
- spin_lock_irqsave(&priv->shrd->sta_lock,
- flags_spin);
+ spin_lock_bh(&priv->sta_lock);
iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->shrd->sta_lock,
- flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
@@ -440,7 +477,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
break;
}
}
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
return ret;
}
@@ -451,10 +488,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr)
{
- unsigned long flags;
u8 tid;
- if (!iwl_is_ready(priv->shrd)) {
+ if (!iwl_is_ready(priv)) {
IWL_DEBUG_INFO(priv,
"Unable to remove station %pM, device not ready.\n",
addr);
@@ -472,7 +508,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
if (WARN_ON(sta_id == IWL_INVALID_STATION))
return -EINVAL;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
@@ -502,14 +538,49 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
if (WARN_ON(priv->num_stations < 0))
priv->num_stations = 0;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_remove_station(priv, addr, sta_id, false);
out_err:
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ u8 tid;
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id);
+
+ if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+ return;
+
+ spin_lock_bh(&priv->sta_lock);
+
+ WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE));
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ if (WARN_ON_ONCE(priv->num_stations < 0))
+ priv->num_stations = 0;
+
+ spin_unlock_bh(&priv->sta_lock);
+}
+
/**
* iwl_clear_ucode_stations - clear ucode station table bits
*
@@ -522,12 +593,11 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int i;
- unsigned long flags_spin;
bool cleared = false;
IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
if (ctx && ctx->ctxid != priv->stations[i].ctxid)
continue;
@@ -539,7 +609,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
cleared = true;
}
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
if (!cleared)
IWL_DEBUG_INFO(priv,
@@ -558,20 +628,19 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
int i;
bool found = false;
int ret;
bool send_lq;
- if (!iwl_is_ready(priv->shrd)) {
+ if (!iwl_is_ready(priv)) {
IWL_DEBUG_INFO(priv,
"Not ready yet, not restoring any stations.\n");
return;
}
IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
if (ctx->ctxid != priv->stations[i].ctxid)
continue;
@@ -591,27 +660,24 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
sizeof(struct iwl_addsta_cmd));
send_lq = false;
if (priv->stations[i].lq) {
- if (priv->shrd->wowlan)
+ if (priv->wowlan)
iwl_sta_fill_lq(priv, ctx, i, &lq);
else
memcpy(&lq, priv->stations[i].lq,
sizeof(struct iwl_link_quality_cmd));
send_lq = true;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock,
- flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
if (ret) {
- spin_lock_irqsave(&priv->shrd->sta_lock,
- flags_spin);
+ spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
priv->stations[i].sta.sta.addr);
priv->stations[i].used &=
~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &=
~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->shrd->sta_lock,
- flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
/*
* Rate scaling has already been initialized, send
@@ -620,12 +686,12 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (send_lq)
iwl_send_lq_cmd(priv, ctx, &lq,
CMD_SYNC, true);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
}
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
if (!found)
IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
"no stations to be restored.\n");
@@ -634,52 +700,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
"complete.\n");
}
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- int sta_id = ctx->ap_sta_id;
- int ret;
- struct iwl_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- bool active, have_lq = false;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
- return;
- }
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- sta_cmd.mode = 0;
- if (priv->stations[sta_id].lq) {
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
- have_lq = true;
- }
-
- active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
- if (active) {
- ret = iwl_send_remove_station(
- priv, priv->stations[sta_id].sta.sta.addr,
- sta_id, true);
- if (ret)
- IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- }
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret)
- IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
- priv->stations[sta_id].sta.sta.addr, ret);
- if (have_lq)
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
-}
-
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
{
int i;
@@ -693,10 +713,9 @@ int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
{
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
if (!(priv->stations[i].used & IWL_STA_BCAST))
continue;
@@ -708,7 +727,7 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
kfree(priv->stations[i].lq);
priv->stations[i].lq = NULL;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -780,8 +799,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init)
{
int ret = 0;
- unsigned long flags_spin;
-
struct iwl_host_cmd cmd = {
.id = REPLY_TX_LINK_QUALITY_CMD,
.len = { sizeof(struct iwl_link_quality_cmd), },
@@ -793,19 +810,19 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return -EINVAL;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
iwl_dump_lq_cmd(priv, lq);
if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
return -EINVAL;
if (is_lq_table_valid(priv, ctx, lq))
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
else
ret = -EINVAL;
@@ -816,9 +833,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
IWL_DEBUG_INFO(priv, "init LQ command complete, "
"clearing sta addition status for sta %d\n",
lq->sta_id);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
+ spin_unlock_bh(&priv->sta_lock);
}
return ret;
}
@@ -831,7 +848,7 @@ void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
u32 rate_flags = 0;
__le32 rate_n_flags;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
memset(link_cmd, 0, sizeof(*link_cmd));
@@ -903,7 +920,6 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
int ret;
u8 sta_id;
struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
if (sta_id_r)
*sta_id_r = IWL_INVALID_STATION;
@@ -917,9 +933,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
if (sta_id_r)
*sta_id_r = sta_id;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
/* Set up default rate scaling table in device's station table */
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
@@ -934,9 +950,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -991,7 +1007,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
cmd.len[0] = cmd_size;
if (not_empty || send_if_empty)
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
else
return 0;
}
@@ -999,7 +1015,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
return iwl_send_static_wepkey_cmd(priv, ctx, false);
}
@@ -1010,13 +1026,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_is_rfkill(priv->shrd)) {
+ if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv,
"Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
@@ -1035,7 +1051,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
@@ -1077,32 +1093,19 @@ static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
struct ieee80211_sta *sta)
{
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- u8 sta_id = IWL_INVALID_STATION;
if (sta)
- sta_id = iwl_sta_id(sta);
+ return iwl_sta_id(sta);
/*
* The device expects GTKs for station interfaces to be
* installed as GTKs for the AP station. If we have no
* station ID, then use the ap_sta_id in that case.
*/
- if (!sta && vif && vif_priv->ctx) {
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- sta_id = vif_priv->ctx->ap_sta_id;
- break;
- default:
- /*
- * In all other cases, the key will be
- * used either for TX only or is bound
- * to a station already.
- */
- break;
- }
- }
+ if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx)
+ return vif_priv->ctx->ap_sta_id;
- return sta_id;
+ return IWL_INVALID_STATION;
}
static int iwlagn_send_sta_key(struct iwl_priv *priv,
@@ -1110,14 +1113,13 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
u32 cmd_flags)
{
- unsigned long flags;
__le16 key_flags;
struct iwl_addsta_cmd sta_cmd;
int i;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
@@ -1184,7 +1186,6 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta)
{
- unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
__le16 key_flags;
@@ -1193,16 +1194,16 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
if (sta_id == IWL_INVALID_STATION)
return -ENOENT;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
sta_id = IWL_INVALID_STATION;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
if (sta_id == IWL_INVALID_STATION)
return 0;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
ctx->key_mapping_keys--;
@@ -1242,7 +1243,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
@@ -1297,21 +1298,20 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
u8 sta_id;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return -EINVAL;
}
priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
@@ -1320,9 +1320,9 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
return -ENOMEM;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1336,7 +1336,6 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- unsigned long flags;
struct iwl_link_quality_cmd *link_cmd;
u8 sta_id = ctx->bcast_sta_id;
@@ -1346,13 +1345,13 @@ int iwl_update_bcast_station(struct iwl_priv *priv,
return -ENOMEM;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
if (priv->stations[sta_id].lq)
kfree(priv->stations[sta_id].lq);
else
IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1376,18 +1375,17 @@ int iwl_update_bcast_stations(struct iwl_priv *priv)
*/
int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
{
- unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -1395,24 +1393,23 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid, u16 ssn)
{
- unsigned long flags;
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -1420,11 +1417,10 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid)
{
- unsigned long flags;
int sta_id;
struct iwl_addsta_cmd sta_cmd;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -1432,13 +1428,13 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return -ENXIO;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
priv->stations[sta_id].sta.station_flags_msk = 0;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
@@ -1447,16 +1443,14 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask =
- STA_MODIFY_SLEEP_TX_COUNT_MSK;
- priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ struct iwl_addsta_cmd cmd = {
+ .mode = STA_CONTROL_MODIFY_MSK,
+ .station_flags = STA_FLG_PWR_SAVE_MSK,
+ .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+ .sta.sta_id = sta_id,
+ .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ };
+ iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index b0dff7a753a5..baaf5ba2fc38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -34,6 +34,7 @@
#include <net/mac80211.h>
+#include "iwl-agn.h"
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
@@ -173,24 +174,24 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (tt->state == IWL_TI_CT_KILL) {
if (priv->thermal_throttle.ct_kill_toggle) {
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = false;
} else {
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+ iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
priv->thermal_throttle.ct_kill_toggle = true;
}
- iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&bus(priv)->reg_lock, flags);
- if (!iwl_grab_nic_access(bus(priv)))
- iwl_release_nic_access(bus(priv));
- spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+ iwl_read32(trans(priv), CSR_UCODE_DRV_GP1);
+ spin_lock_irqsave(&trans(priv)->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans(priv))))
+ iwl_release_nic_access(trans(priv));
+ spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -224,7 +225,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
struct iwl_priv *priv = (struct iwl_priv *)data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* temperature timer expired, ready to go into CT_KILL state */
@@ -232,7 +233,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
"temperature timer expired\n");
tt->state = IWL_TI_CT_KILL;
- set_bit(STATUS_CT_KILL, &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
}
}
@@ -310,24 +311,23 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
tt->tt_power_mode = IWL_POWER_INDEX_5;
break;
}
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->shrd->status);
+ clear_bit(STATUS_CT_KILL, &priv->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
* try again during next temperature read
*/
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
tt->state = old_state;
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
} else {
if (tt->state == IWL_TI_CT_KILL) {
if (force) {
- set_bit(STATUS_CT_KILL,
- &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -341,7 +341,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
tt->tt_power_mode);
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
}
@@ -451,9 +451,9 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
* in case get disabled before */
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (old_state == IWL_TI_CT_KILL)
- clear_bit(STATUS_CT_KILL, &priv->shrd->status);
+ clear_bit(STATUS_CT_KILL, &priv->status);
if (tt->state != IWL_TI_CT_KILL &&
iwl_power_update_mode(priv, true)) {
/* TT state not updated
@@ -462,7 +462,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
IWL_ERR(priv, "Cannot update power mode, "
"TT state not updated\n");
if (old_state == IWL_TI_CT_KILL)
- set_bit(STATUS_CT_KILL, &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
tt->state = old_state;
} else {
IWL_DEBUG_TEMP(priv,
@@ -473,8 +473,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
if (force) {
IWL_DEBUG_TEMP(priv,
"Enter IWL_TI_CT_KILL\n");
- set_bit(STATUS_CT_KILL,
- &priv->shrd->status);
+ set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
} else {
iwl_prepare_ct_kill_task(priv);
@@ -486,7 +485,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
iwl_perform_ct_kill_task(priv, false);
}
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
}
@@ -505,10 +504,10 @@ static void iwl_bg_ct_enter(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!iwl_is_ready(priv->shrd))
+ if (!iwl_is_ready(priv))
return;
if (tt->state != IWL_TI_CT_KILL) {
@@ -534,10 +533,10 @@ static void iwl_bg_ct_exit(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!iwl_is_ready(priv->shrd))
+ if (!iwl_is_ready(priv))
return;
/* stop ct_kill_exit_tm timer */
@@ -564,20 +563,20 @@ static void iwl_bg_ct_exit(struct work_struct *work)
void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
- queue_work(priv->shrd->workqueue, &priv->ct_enter);
+ queue_work(priv->workqueue, &priv->ct_enter);
}
void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
- queue_work(priv->shrd->workqueue, &priv->ct_exit);
+ queue_work(priv->workqueue, &priv->ct_exit);
}
static void iwl_bg_tt_work(struct work_struct *work)
@@ -585,7 +584,7 @@ static void iwl_bg_tt_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (!priv->thermal_throttle.advanced_tt)
@@ -596,11 +595,11 @@ static void iwl_bg_tt_work(struct work_struct *work)
void iwl_tt_handler(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
- queue_work(priv->shrd->workqueue, &priv->tt_work);
+ queue_work(priv->workqueue, &priv->tt_work);
}
/* Thermal throttling initialization
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
index 7282a23e8f1c..86bbf47501c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 63bbc60be28e..34adedc74d35 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -126,7 +126,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
u8 data_retry_limit;
u8 rate_plcp;
- if (priv->shrd->wowlan) {
+ if (priv->wowlan) {
rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
} else {
@@ -208,10 +208,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
}
static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -249,6 +248,35 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
}
}
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
+
/*
* start REPLY_TX command process
*/
@@ -260,19 +288,16 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_device_cmd *dev_cmd = NULL;
struct iwl_tx_cmd *tx_cmd;
-
__le16 fc;
u8 hdr_len;
u16 len, seq_number = 0;
u8 sta_id, tid = IWL_MAX_TID_COUNT;
- unsigned long flags;
bool is_agg = false;
if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
- spin_lock_irqsave(&priv->shrd->lock, flags);
- if (iwl_is_rfkill(priv->shrd)) {
+ if (iwl_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock_priv;
}
@@ -308,7 +333,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
sta_id = ctx->bcast_sta_id;
else {
/* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
+ sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -322,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
sta_priv = (void *)info->control.sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+ (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
@@ -331,6 +356,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
* counter.
* For now set the counter to just 1 since we do not
* support uAPSD yet.
+ *
+ * FIXME: If we get two non-bufferable frames one
+ * after the other, we might only send out one of
+ * them because this is racy.
*/
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
@@ -338,13 +367,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_AMPDU)
is_agg = true;
- /* irqs already disabled/saved above when locking priv->shrd->lock */
- spin_lock(&priv->shrd->sta_lock);
-
- dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);
+ dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC);
if (unlikely(!dev_cmd))
- goto drop_unlock_sta;
+ goto drop_unlock_priv;
memset(dev_cmd, 0, sizeof(*dev_cmd));
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
@@ -354,7 +380,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
tx_cmd->len = cpu_to_le16(len);
if (info->control.hw_key)
- iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+ iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
/* TODO need this for burst mode later on */
iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
@@ -369,6 +395,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
+ spin_lock(&priv->sta_lock);
+
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
u8 *qc = NULL;
struct iwl_tid_data *tid_data;
@@ -414,8 +442,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
!ieee80211_has_morefrags(fc))
priv->tid_data[sta_id][tid].seq_number = seq_number;
- spin_unlock(&priv->shrd->sta_lock);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_unlock(&priv->sta_lock);
/*
* Avoid atomic ops if it isn't an associated client.
@@ -431,10 +458,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
drop_unlock_sta:
if (dev_cmd)
- kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
- spin_unlock(&priv->shrd->sta_lock);
+ kmem_cache_free(iwl_tx_cmd_pool, dev_cmd);
+ spin_unlock(&priv->sta_lock);
drop_unlock_priv:
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
return -1;
}
@@ -442,7 +468,6 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
struct iwl_tid_data *tid_data;
- unsigned long flags;
int sta_id;
sta_id = iwl_sta_id(sta);
@@ -452,7 +477,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
return -ENXIO;
}
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
@@ -472,7 +497,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_WARN(priv, "Stopping AGG while state not ON "
"or starting for %d on %d (%d)\n", sta_id, tid,
priv->tid_data[sta_id][tid].agg.state);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -486,7 +511,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
tid_data->next_reclaimed);
priv->tid_data[sta_id][tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -495,14 +520,10 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
turn_off:
priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
- /* do not restore/save irqs */
- spin_unlock(&priv->shrd->sta_lock);
- spin_lock(&priv->shrd->lock);
+ spin_unlock_bh(&priv->sta_lock);
iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
return 0;
@@ -512,7 +533,6 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_tid_data *tid_data;
- unsigned long flags;
int sta_id;
int ret;
@@ -536,7 +556,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (ret)
return ret;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
@@ -545,7 +565,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
if (ret) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -562,7 +582,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -572,14 +592,13 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
{
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- unsigned long flags;
u16 ssn;
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock_bh(&priv->sta_lock);
ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock_bh(&priv->sta_lock);
iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
buf_size, ssn);
@@ -604,8 +623,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
sta_priv->max_agg_bufsize =
min(sta_priv->max_agg_bufsize, buf_size);
- if (cfg(priv)->ht_params &&
- cfg(priv)->ht_params->use_rts_for_aggregation) {
+ if (hw_params(priv).use_rts_for_aggregation) {
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
@@ -635,7 +653,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
struct ieee80211_vif *vif;
u8 *addr;
- lockdep_assert_held(&priv->shrd->sta_lock);
+ lockdep_assert_held(&priv->sta_lock);
addr = priv->stations[sta_id].sta.sta.addr;
ctx = priv->stations[sta_id].ctxid;
@@ -982,19 +1000,19 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
{
if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- queue_work(priv->shrd->workqueue, &priv->tx_flush);
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+ queue_work(priv->workqueue, &priv->tx_flush);
}
}
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
- struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
struct ieee80211_hdr *hdr;
u32 status = le16_to_cpu(tx_resp->status.status);
u16 ssn = iwlagn_get_scd_ssn(tx_resp);
@@ -1002,7 +1020,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
int sta_id;
int freed;
struct ieee80211_tx_info *info;
- unsigned long flags;
struct sk_buff_head skbs;
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
@@ -1013,11 +1030,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock(&priv->sta_lock);
if (is_agg)
iwl_rx_reply_tx_agg(priv, tx_resp);
+ __skb_queue_head_init(&skbs);
+
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1037,8 +1056,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
next_reclaimed = ssn;
}
- __skb_queue_head_init(&skbs);
-
if (tid != IWL_TID_NON_QOS) {
priv->tid_data[sta_id][tid].next_reclaimed =
next_reclaimed;
@@ -1047,12 +1064,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
}
/*we can free until ssn % q.n_bd not inclusive */
- WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
- ssn, status, &skbs));
+ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid,
+ txq_id, ssn, &skbs));
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
- while (!skb_queue_empty(&skbs)) {
- skb = __skb_dequeue(&skbs);
+
+ /* process frames */
+ skb_queue_walk(&skbs, skb) {
hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data_qos(hdr->frame_control))
@@ -1060,7 +1078,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
info = IEEE80211_SKB_CB(skb);
ctx = info->driver_data[0];
- kmem_cache_free(priv->tx_cmd_pool,
+ kmem_cache_free(iwl_tx_cmd_pool,
(info->driver_data[1]));
memset(&info->status, 0, sizeof(info->status));
@@ -1068,9 +1086,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
- ctx->last_tx_rejected = true;
- iwl_trans_stop_queue(trans(priv), txq_id,
- "Tx on passive channel");
+ /* block and stop all queues */
+ priv->passive_no_rx = true;
+ IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
+ "passive channel");
+ ieee80211_stop_queues(priv->hw);
IWL_DEBUG_TX_REPLY(priv,
"TXQ %d status %s (0x%08x) "
@@ -1094,8 +1114,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
- ieee80211_tx_status_irqsafe(priv->hw, skb);
-
freed++;
}
@@ -1103,7 +1121,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
}
iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
+
+ while (!skb_queue_empty(&skbs)) {
+ skb = __skb_dequeue(&skbs);
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
return 0;
}
@@ -1114,17 +1138,16 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
* of frames sent via aggregation.
*/
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
struct iwl_ht_agg *agg;
struct sk_buff_head reclaimed_skbs;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
- unsigned long flags;
int sta_id;
int tid;
int freed;
@@ -1136,7 +1159,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
- if (scd_flow >= hw_params(priv).max_txq_num) {
+ if (scd_flow >= cfg(priv)->base_params->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
return 0;
@@ -1146,12 +1169,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ spin_lock(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ spin_unlock(&priv->sta_lock);
return 0;
}
@@ -1161,8 +1184,8 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
- ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ ba_resp_scd_ssn, &reclaimed_skbs)) {
+ spin_unlock(&priv->sta_lock);
return 0;
}
@@ -1198,9 +1221,8 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
- while (!skb_queue_empty(&reclaimed_skbs)) {
- skb = __skb_dequeue(&reclaimed_skbs);
+ skb_queue_walk(&reclaimed_skbs, skb) {
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
@@ -1209,7 +1231,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
WARN_ON_ONCE(1);
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+ kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
if (freed == 1) {
/* this is the first skb we deliver in this batch */
@@ -1223,10 +1245,14 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
info);
}
+ }
+
+ spin_unlock(&priv->sta_lock);
- ieee80211_tx_status_irqsafe(priv->hw, skb);
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ skb = __skb_dequeue(&reclaimed_skbs);
+ ieee80211_tx_status(priv->hw, skb);
}
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b5c7c5f0a753..f1226dbf789d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -34,7 +34,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
@@ -43,15 +42,14 @@
#include <asm/div64.h>
#include "iwl-eeprom.h"
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
#include "iwl-shared.h"
-#include "iwl-bus.h"
#include "iwl-trans.h"
+#include "iwl-op-mode.h"
/******************************************************************************
*
@@ -134,7 +132,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
* beacon contents.
*/
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
@@ -199,7 +197,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
cmd.data[1] = priv->beacon_skb->data;
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -208,7 +206,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
container_of(work, struct iwl_priv, beacon_update);
struct sk_buff *beacon;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "updating beacon w/o beacon context!\n");
goto out;
@@ -238,7 +236,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
iwlagn_send_beacon_cmd(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_bt_runtime_config(struct work_struct *work)
@@ -246,11 +244,11 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
iwlagn_send_advance_bt_config(priv);
}
@@ -261,13 +259,13 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
container_of(work, struct iwl_priv, bt_full_concurrency);
struct iwl_rxon_context *ctx;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
goto out;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
goto out;
IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
@@ -285,7 +283,7 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
iwlagn_send_advance_bt_config(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/**
@@ -302,11 +300,11 @@ static void iwl_bg_statistics_periodic(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* dont send host command if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
iwl_send_statistics_request(priv, CMD_ASYNC, false);
@@ -315,7 +313,7 @@ static void iwl_bg_statistics_periodic(unsigned long data)
static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
u32 start_idx, u32 num_events,
- u32 mode)
+ u32 capacity, u32 mode)
{
u32 i;
u32 ptr; /* SRAM byte address of log data */
@@ -328,87 +326,125 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&bus(priv)->reg_lock, reg_flags);
- if (iwl_grab_nic_access(bus(priv))) {
- spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
+ spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags);
+ if (unlikely(!iwl_grab_nic_access(trans(priv)))) {
+ spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
return;
}
/* Set starting address; reads will auto-increment */
- iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, ptr);
- rmb();
+ iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr);
+
+ /*
+ * Refuse to read more than would have fit into the log from
+ * the current start_idx. This used to happen due to the race
+ * described below, but now WARN because the code below should
+ * prevent it from happening here.
+ */
+ if (WARN_ON(num_events > capacity - start_idx))
+ num_events = capacity - start_idx;
/*
* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing.
*/
for (i = 0; i < num_events; i++) {
- ev = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
- time = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+ time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
if (mode == 0) {
- trace_iwlwifi_dev_ucode_cont_event(priv,
- 0, time, ev);
+ trace_iwlwifi_dev_ucode_cont_event(
+ trans(priv)->dev, 0, time, ev);
} else {
- data = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
- trace_iwlwifi_dev_ucode_cont_event(priv,
- time, data, ev);
+ data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_dev_ucode_cont_event(
+ trans(priv)->dev, time, data, ev);
}
}
/* Allow device to power down */
- iwl_release_nic_access(bus(priv));
- spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
+ iwl_release_nic_access(trans(priv));
+ spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
{
u32 capacity; /* event log capacity in # entries */
+ struct {
+ u32 capacity;
+ u32 mode;
+ u32 wrap_counter;
+ u32 write_counter;
+ } __packed read;
u32 base; /* SRAM byte address of event log header */
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
- base = priv->shrd->device_pointers.error_event_table;
+ base = priv->shrd->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- capacity = iwl_read_targ_mem(bus(priv), base);
- num_wraps = iwl_read_targ_mem(bus(priv),
- base + (2 * sizeof(u32)));
- mode = iwl_read_targ_mem(bus(priv), base + (1 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(bus(priv),
- base + (3 * sizeof(u32)));
+ iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read));
+
+ capacity = read.capacity;
+ mode = read.mode;
+ num_wraps = read.wrap_counter;
+ next_entry = read.write_counter;
} else
return;
+ /*
+ * Unfortunately, the uCode doesn't use temporary variables.
+ * Therefore, it can happen that we read next_entry == capacity,
+ * which really means next_entry == 0.
+ */
+ if (unlikely(next_entry == capacity))
+ next_entry = 0;
+ /*
+ * Additionally, the uCode increases the write pointer before
+ * the wraps counter, so if the write pointer is smaller than
+ * the old write pointer (wrap occurred) but we read that no
+ * wrap occurred, we actually read between the next_entry and
+ * num_wraps update (this does happen in practice!!) -- take
+ * that into account by increasing num_wraps.
+ */
+ if (unlikely(next_entry < priv->event_log.next_entry &&
+ num_wraps == priv->event_log.num_wraps))
+ num_wraps++;
+
if (num_wraps == priv->event_log.num_wraps) {
- iwl_print_cont_event_trace(priv,
- base, priv->event_log.next_entry,
- next_entry - priv->event_log.next_entry,
- mode);
+ iwl_print_cont_event_trace(
+ priv, base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ capacity, mode);
+
priv->event_log.non_wraps_count++;
} else {
- if ((num_wraps - priv->event_log.num_wraps) > 1)
+ if (num_wraps - priv->event_log.num_wraps > 1)
priv->event_log.wraps_more_count++;
else
priv->event_log.wraps_once_count++;
- trace_iwlwifi_dev_ucode_wrap_event(priv,
+
+ trace_iwlwifi_dev_ucode_wrap_event(trans(priv)->dev,
num_wraps - priv->event_log.num_wraps,
next_entry, priv->event_log.next_entry);
+
if (next_entry < priv->event_log.next_entry) {
- iwl_print_cont_event_trace(priv, base,
- priv->event_log.next_entry,
- capacity - priv->event_log.next_entry,
- mode);
+ iwl_print_cont_event_trace(
+ priv, base, priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ capacity, mode);
- iwl_print_cont_event_trace(priv, base, 0,
- next_entry, mode);
+ iwl_print_cont_event_trace(
+ priv, base, 0, next_entry, capacity, mode);
} else {
- iwl_print_cont_event_trace(priv, base,
- next_entry, capacity - next_entry,
- mode);
+ iwl_print_cont_event_trace(
+ priv, base, next_entry,
+ capacity - next_entry,
+ capacity, mode);
- iwl_print_cont_event_trace(priv, base, 0,
- next_entry, mode);
+ iwl_print_cont_event_trace(
+ priv, base, 0, next_entry, capacity, mode);
}
}
+
priv->event_log.num_wraps = num_wraps;
priv->event_log.next_entry = next_entry;
}
@@ -425,7 +461,7 @@ static void iwl_bg_ucode_trace(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (priv->event_log.ucode_trace) {
@@ -441,11 +477,11 @@ static void iwl_bg_tx_flush(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, tx_flush);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* do nothing if rf-kill is on */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
@@ -475,6 +511,7 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
BIT(NL80211_IFTYPE_ADHOC);
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
@@ -509,620 +546,15 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
}
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-
-#define UCODE_EXPERIMENTAL_INDEX 100
-#define UCODE_EXPERIMENTAL_TAG "exp"
-
-static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
-{
- const char *name_pre = cfg(priv)->fw_name_pre;
- char tag[8];
-
- if (first) {
-#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
- priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
- strcpy(tag, UCODE_EXPERIMENTAL_TAG);
- } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
-#endif
- priv->fw_index = cfg(priv)->ucode_api_max;
- sprintf(tag, "%d", priv->fw_index);
- } else {
- priv->fw_index--;
- sprintf(tag, "%d", priv->fw_index);
- }
-
- if (priv->fw_index < cfg(priv)->ucode_api_min) {
- IWL_ERR(priv, "no suitable firmware found!\n");
- return -ENOENT;
- }
-
- sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
- IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
- (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? "EXPERIMENTAL " : "",
- priv->firmware_name);
-
- return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- bus(priv)->dev,
- GFP_KERNEL, priv, iwl_ucode_callback);
-}
-
-struct iwlagn_firmware_pieces {
- const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
- size_t inst_size, data_size, init_size, init_data_size,
- wowlan_inst_size, wowlan_data_size;
-
- u32 build;
-
- u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
- u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
-};
-
-static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
- const struct firmware *ucode_raw,
- struct iwlagn_firmware_pieces *pieces)
-{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
- u32 api_ver, hdr_size;
- const u8 *src;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- switch (api_ver) {
- default:
- hdr_size = 28;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(priv, "File size too small!\n");
- return -EINVAL;
- }
- pieces->build = le32_to_cpu(ucode->u.v2.build);
- pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
- pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
- pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
- pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
- src = ucode->u.v2.data;
- break;
- case 0:
- case 1:
- case 2:
- hdr_size = 24;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(priv, "File size too small!\n");
- return -EINVAL;
- }
- pieces->build = 0;
- pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
- pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
- pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
- pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
- src = ucode->u.v1.data;
- break;
- }
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != hdr_size + pieces->inst_size +
- pieces->data_size + pieces->init_size +
- pieces->init_data_size) {
-
- IWL_ERR(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- return -EINVAL;
- }
-
- pieces->inst = src;
- src += pieces->inst_size;
- pieces->data = src;
- src += pieces->data_size;
- pieces->init = src;
- src += pieces->init_size;
- pieces->init_data = src;
- src += pieces->init_data_size;
-
- return 0;
-}
-
-static int iwlagn_load_firmware(struct iwl_priv *priv,
- const struct firmware *ucode_raw,
- struct iwlagn_firmware_pieces *pieces,
- struct iwlagn_ucode_capabilities *capa)
-{
- struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
- struct iwl_ucode_tlv *tlv;
- size_t len = ucode_raw->size;
- const u8 *data;
- int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
- int tmp;
- u64 alternatives;
- u32 tlv_len;
- enum iwl_ucode_tlv_type tlv_type;
- const u8 *tlv_data;
-
- if (len < sizeof(*ucode)) {
- IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
- return -EINVAL;
- }
-
- if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
- IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
- le32_to_cpu(ucode->magic));
- return -EINVAL;
- }
-
- /*
- * Check which alternatives are present, and "downgrade"
- * when the chosen alternative is not present, warning
- * the user when that happens. Some files may not have
- * any alternatives, so don't warn in that case.
- */
- alternatives = le64_to_cpu(ucode->alternatives);
- tmp = wanted_alternative;
- if (wanted_alternative > 63)
- wanted_alternative = 63;
- while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
- wanted_alternative--;
- if (wanted_alternative && wanted_alternative != tmp)
- IWL_WARN(priv,
- "uCode alternative %d not available, choosing %d\n",
- tmp, wanted_alternative);
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- pieces->build = le32_to_cpu(ucode->build);
- data = ucode->data;
-
- len -= sizeof(*ucode);
-
- while (len >= sizeof(*tlv)) {
- u16 tlv_alt;
-
- len -= sizeof(*tlv);
- tlv = (void *)data;
-
- tlv_len = le32_to_cpu(tlv->length);
- tlv_type = le16_to_cpu(tlv->type);
- tlv_alt = le16_to_cpu(tlv->alternative);
- tlv_data = tlv->data;
-
- if (len < tlv_len) {
- IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
- len, tlv_len);
- return -EINVAL;
- }
- len -= ALIGN(tlv_len, 4);
- data += sizeof(*tlv) + ALIGN(tlv_len, 4);
-
- /*
- * Alternative 0 is always valid.
- *
- * Skip alternative TLVs that are not selected.
- */
- if (tlv_alt != 0 && tlv_alt != wanted_alternative)
- continue;
-
- switch (tlv_type) {
- case IWL_UCODE_TLV_INST:
- pieces->inst = tlv_data;
- pieces->inst_size = tlv_len;
- break;
- case IWL_UCODE_TLV_DATA:
- pieces->data = tlv_data;
- pieces->data_size = tlv_len;
- break;
- case IWL_UCODE_TLV_INIT:
- pieces->init = tlv_data;
- pieces->init_size = tlv_len;
- break;
- case IWL_UCODE_TLV_INIT_DATA:
- pieces->init_data = tlv_data;
- pieces->init_data_size = tlv_len;
- break;
- case IWL_UCODE_TLV_BOOT:
- IWL_ERR(priv, "Found unexpected BOOT ucode\n");
- break;
- case IWL_UCODE_TLV_PROBE_MAX_LEN:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- capa->max_probe_length =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_PAN:
- if (tlv_len)
- goto invalid_tlv_len;
- capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
- break;
- case IWL_UCODE_TLV_FLAGS:
- /* must be at least one u32 */
- if (tlv_len < sizeof(u32))
- goto invalid_tlv_len;
- /* and a proper number of u32s */
- if (tlv_len % sizeof(u32))
- goto invalid_tlv_len;
- /*
- * This driver only reads the first u32 as
- * right now no more features are defined,
- * if that changes then either the driver
- * will not work with the new firmware, or
- * it'll not take advantage of new features.
- */
- capa->flags = le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->init_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->init_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->init_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->inst_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->inst_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- pieces->inst_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
- if (tlv_len)
- goto invalid_tlv_len;
- priv->enhance_sensitivity_table = true;
- break;
- case IWL_UCODE_TLV_WOWLAN_INST:
- pieces->wowlan_inst = tlv_data;
- pieces->wowlan_inst_size = tlv_len;
- break;
- case IWL_UCODE_TLV_WOWLAN_DATA:
- pieces->wowlan_data = tlv_data;
- pieces->wowlan_data_size = tlv_len;
- break;
- case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- capa->standard_phy_calibration_size =
- le32_to_cpup((__le32 *)tlv_data);
- break;
- default:
- IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
- break;
- }
- }
-
- if (len) {
- IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
- iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
- return -EINVAL;
- }
-
- return 0;
-
- invalid_tlv_len:
- IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
- iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
-
- return -EINVAL;
-}
-
-/**
- * iwl_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
- struct iwl_priv *priv = context;
- struct iwl_ucode_header *ucode;
- int err;
- struct iwlagn_firmware_pieces pieces;
- const unsigned int api_max = cfg(priv)->ucode_api_max;
- unsigned int api_ok = cfg(priv)->ucode_api_ok;
- const unsigned int api_min = cfg(priv)->ucode_api_min;
- u32 api_ver;
- char buildstr[25];
- u32 build;
- struct iwlagn_ucode_capabilities ucode_capa = {
- .max_probe_length = 200,
- .standard_phy_calibration_size =
- IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
- };
-
- if (!api_ok)
- api_ok = api_max;
-
- memset(&pieces, 0, sizeof(pieces));
-
- if (!ucode_raw) {
- if (priv->fw_index <= api_ok)
- IWL_ERR(priv,
- "request for firmware file '%s' failed.\n",
- priv->firmware_name);
- goto try_again;
- }
-
- IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
- priv->firmware_name, ucode_raw->size);
-
- /* Make sure that we got at least the API version number */
- if (ucode_raw->size < 4) {
- IWL_ERR(priv, "File size way too small!\n");
- goto try_again;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- if (ucode->ver)
- err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
- else
- err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
- &ucode_capa);
-
- if (err)
- goto try_again;
-
- api_ver = IWL_UCODE_API(priv->ucode_ver);
- build = pieces.build;
-
- /*
- * api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward
- */
- /* no api version check required for experimental uCode */
- if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver < api_ok) {
- if (api_ok != api_max)
- IWL_ERR(priv, "Firmware has old API version, "
- "expected v%u through v%u, got v%u.\n",
- api_ok, api_max, api_ver);
- else
- IWL_ERR(priv, "Firmware has old API version, "
- "expected v%u, got v%u.\n",
- api_max, api_ver);
- IWL_ERR(priv, "New firmware can be obtained from "
- "http://www.intellinuxwireless.org/.\n");
- }
- }
-
- if (build)
- sprintf(buildstr, " build %u%s", build,
- (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
- else
- buildstr[0] = '\0';
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver),
- buildstr);
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u%s",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver),
- buildstr);
-
- /*
- * For any of the failures below (before allocating pci memory)
- * we will try to load a version with a smaller API -- maybe the
- * user just got a corrupted version of the latest API.
- */
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
- pieces.inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
- pieces.data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
- pieces.init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
- pieces.init_data_size);
-
- /* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > hw_params(priv).max_inst_size) {
- IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
- pieces.inst_size);
- goto try_again;
- }
-
- if (pieces.data_size > hw_params(priv).max_data_size) {
- IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
- pieces.data_size);
- goto try_again;
- }
-
- if (pieces.init_size > hw_params(priv).max_inst_size) {
- IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
- pieces.init_size);
- goto try_again;
- }
-
- if (pieces.init_data_size > hw_params(priv).max_data_size) {
- IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
- pieces.init_data_size);
- goto try_again;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
- pieces.inst, pieces.inst_size))
- goto err_pci_alloc;
- if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
- pieces.data, pieces.data_size))
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (pieces.init_size && pieces.init_data_size) {
- if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
- pieces.init, pieces.init_size))
- goto err_pci_alloc;
- if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
- pieces.init_data, pieces.init_data_size))
- goto err_pci_alloc;
- }
-
- /* WoWLAN instructions and data */
- if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
- if (iwl_alloc_fw_desc(bus(priv),
- &trans(priv)->ucode_wowlan.code,
- pieces.wowlan_inst,
- pieces.wowlan_inst_size))
- goto err_pci_alloc;
- if (iwl_alloc_fw_desc(bus(priv),
- &trans(priv)->ucode_wowlan.data,
- pieces.wowlan_data,
- pieces.wowlan_data_size))
- goto err_pci_alloc;
- }
-
- /* Now that we can no longer fail, copy information */
-
- /*
- * The (size - 16) / 12 formula is based on the information recorded
- * for each event, which is of mode 1 (including timestamp) for all
- * new microcodes that include this information.
- */
- priv->init_evtlog_ptr = pieces.init_evtlog_ptr;
- if (pieces.init_evtlog_size)
- priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
- else
- priv->init_evtlog_size =
- cfg(priv)->base_params->max_event_log_size;
- priv->init_errlog_ptr = pieces.init_errlog_ptr;
- priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
- if (pieces.inst_evtlog_size)
- priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
- else
- priv->inst_evtlog_size =
- cfg(priv)->base_params->max_event_log_size;
- priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
-#ifndef CONFIG_IWLWIFI_P2P
- ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
-#endif
-
- priv->new_scan_threshold_behaviour =
- !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
-
- if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
- ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
-
- /*
- * if not PAN, then don't support P2P -- might be a uCode
- * packaging bug or due to the eeprom check above
- */
- if (!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN))
- ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
-
- if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
- priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- } else {
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
- priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- }
- /*
- * figure out the offset of chain noise reset and gain commands
- * base on the size of standard phy calibration commands table size
- */
- if (ucode_capa.standard_phy_calibration_size >
- IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
- ucode_capa.standard_phy_calibration_size =
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- priv->phy_calib_chain_noise_reset_cmd =
- ucode_capa.standard_phy_calibration_size;
- priv->phy_calib_chain_noise_gain_cmd =
- ucode_capa.standard_phy_calibration_size + 1;
-
- /* initialize all valid contexts */
- iwl_init_context(priv, ucode_capa.flags);
-
- /**************************************************
- * This is still part of probe() in a sense...
- *
- * 9. Setup and register with mac80211 and debugfs
- **************************************************/
- err = iwlagn_mac_setup_register(priv, &ucode_capa);
- if (err)
- goto out_unbind;
-
- err = iwl_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- complete(&priv->firmware_loading_complete);
- return;
-
- try_again:
- /* try next, if any */
- if (iwl_request_firmware(priv, false))
- goto out_unbind;
- release_firmware(ucode_raw);
- return;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl_dealloc_ucode(trans(priv));
- out_unbind:
- complete(&priv->firmware_loading_complete);
- device_release_driver(bus(priv)->dev);
- release_firmware(ucode_raw);
-}
-
static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
struct iwl_ct_kill_throttling_config adv_cmd;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&priv->shrd->lock, flags);
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
priv->thermal_throttle.ct_kill_toggle = false;
if (cfg(priv)->base_params->support_ct_kill_exit) {
@@ -1131,7 +563,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
adv_cmd.critical_temperature_exit =
cpu_to_le32(hw_params(priv).ct_kill_exit_threshold);
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
if (ret)
@@ -1146,7 +578,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
cmd.critical_temperature_R =
cpu_to_le32(hw_params(priv).ct_kill_threshold);
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
@@ -1172,7 +604,7 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
@@ -1182,9 +614,9 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
.valid = cpu_to_le32(valid_tx_ant),
};
- if (IWL_UCODE_API(priv->ucode_ver) > 1) {
+ if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_trans_send_cmd_pdu(trans(priv),
+ return iwl_dvm_send_cmd_pdu(priv,
TX_ANT_CONFIGURATION_CMD,
CMD_SYNC,
sizeof(struct iwl_tx_ant_config_cmd),
@@ -1205,20 +637,22 @@ int iwl_alive_start(struct iwl_priv *priv)
int ret = 0;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- /*TODO: this should go to the transport layer */
- iwl_reset_ict(trans(priv));
-
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
/* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->shrd->status);
+ set_bit(STATUS_ALIVE, &priv->status);
/* Enable watchdog to monitor the driver tx queues */
iwl_setup_watchdog(priv);
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
return -ERFKILL;
+ if (priv->event_log.ucode_trace) {
+ /* start collecting data now */
+ mod_timer(&priv->ucode_trace, jiffies);
+ }
+
/* download priority table before any calibration request */
if (cfg(priv)->bt_params &&
cfg(priv)->bt_params->advanced_bt_coexist) {
@@ -1235,14 +669,14 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
priv->cur_rssi_ctx = NULL;
- iwl_send_prio_tbl(trans(priv));
+ iwl_send_prio_tbl(priv);
/* FIXME: w/a to force change uCode BT state machine */
- ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
+ ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
@@ -1263,9 +697,9 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, hw_params(priv).valid_tx_ant);
- if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
+ if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
struct iwl_rxon_cmd *active_rxon =
(struct iwl_rxon_cmd *)&ctx->active;
/* apply any changes in staging */
@@ -1280,12 +714,12 @@ int iwl_alive_start(struct iwl_priv *priv)
iwlagn_set_rxon_chain(priv, ctx);
}
- if (!priv->shrd->wowlan) {
+ if (!priv->wowlan) {
/* WoWLAN ucode will not reply in the same way, skip it */
iwl_reset_run_time_calib(priv);
}
- set_bit(STATUS_READY, &priv->shrd->status);
+ set_bit(STATUS_READY, &priv->status);
/* Configure the adapter for unassociated operation */
ret = iwlagn_commit_rxon(priv, ctx);
@@ -1300,14 +734,48 @@ int iwl_alive_start(struct iwl_priv *priv)
return iwl_power_update_mode(priv, true);
}
-static void iwl_cancel_deferred_work(struct iwl_priv *priv);
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+
+ spin_lock_bh(&priv->sta_lock);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
-void __iwl_down(struct iwl_priv *priv)
+ priv->ucode_key_table = 0;
+
+ for_each_context(priv, ctx) {
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+ }
+
+ spin_unlock_bh(&priv->sta_lock);
+}
+
+void iwl_down(struct iwl_priv *priv)
{
int exit_pending;
IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+ lockdep_assert_held(&priv->mutex);
+
iwl_scan_cancel_timeout(priv, 200);
/*
@@ -1318,7 +786,7 @@ void __iwl_down(struct iwl_priv *priv)
ieee80211_remain_on_channel_expired(priv->hw);
exit_pending =
- test_and_set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
@@ -1343,37 +811,29 @@ void __iwl_down(struct iwl_priv *priv)
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
+ priv->ucode_loaded = false;
iwl_trans_stop_device(trans(priv));
/* Clear out all status bits but a few that are stable across reset */
- priv->shrd->status &=
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status) <<
+ priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) <<
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) <<
+ test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
+ priv->shrd->status &=
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
+ STATUS_FW_ERROR;
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = NULL;
}
-void iwl_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->shrd->mutex);
- __iwl_down(priv);
- mutex_unlock(&priv->shrd->mutex);
-
- iwl_cancel_deferred_work(priv);
-}
-
/*****************************************************************************
*
* Workqueue callbacks
@@ -1385,11 +845,11 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv,
run_time_calib_work);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status)) {
- mutex_unlock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ mutex_unlock(&priv->mutex);
return;
}
@@ -1398,7 +858,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
iwl_sensitivity_calibration(priv);
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
void iwlagn_prepare_restart(struct iwl_priv *priv)
@@ -1410,7 +870,7 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
u8 bt_status;
bool bt_is_sco;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
for_each_context(priv, ctx)
ctx->vif = NULL;
@@ -1431,7 +891,7 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
bt_status = priv->bt_status;
bt_is_sco = priv->bt_is_sco;
- __iwl_down(priv);
+ iwl_down(priv);
priv->bt_full_concurrent = bt_full_concurrent;
priv->bt_ci_compliance = bt_ci_compliance;
@@ -1444,13 +904,13 @@ static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwlagn_prepare_restart(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
iwl_cancel_deferred_work(priv);
ieee80211_restart_hw(priv->hw);
} else {
@@ -1465,7 +925,7 @@ void iwlagn_disable_roc(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (!priv->hw_roc_setup)
return;
@@ -1488,9 +948,9 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv,
hw_roc_disable_work.work);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
/*****************************************************************************
@@ -1501,9 +961,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
- priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->shrd->wait_command_queue);
+ priv->workqueue = create_singlethread_workqueue(DRV_NAME);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -1516,8 +974,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
iwl_setup_scan_deferred_work(priv);
- if (cfg(priv)->lib->bt_setup_deferred_work)
- cfg(priv)->lib->bt_setup_deferred_work(priv);
+ if (cfg(priv)->bt_params)
+ iwlagn_bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
@@ -1532,10 +990,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->watchdog.function = iwl_bg_watchdog;
}
-static void iwl_cancel_deferred_work(struct iwl_priv *priv)
+void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (cfg(priv)->lib->cancel_deferred_work)
- cfg(priv)->lib->cancel_deferred_work(priv);
+ if (cfg(priv)->bt_params)
+ iwlagn_bt_cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
@@ -1550,8 +1008,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
del_timer_sync(&priv->ucode_trace);
}
-static void iwl_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
+static void iwl_init_hw_rates(struct ieee80211_rate *rates)
{
int i;
@@ -1575,21 +1032,26 @@ static int iwl_init_drv(struct iwl_priv *priv)
{
int ret;
- spin_lock_init(&priv->shrd->sta_lock);
+ spin_lock_init(&priv->sta_lock);
- mutex_init(&priv->shrd->mutex);
+ mutex_init(&priv->mutex);
- INIT_LIST_HEAD(&trans(priv)->calib_results);
+ INIT_LIST_HEAD(&priv->calib_results);
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
+ priv->plcp_delta_threshold =
+ cfg(priv)->base_params->plcp_delta_threshold;
+
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
priv->agg_tids_count = 0;
+ priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
+
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_RF_RESET;
@@ -1625,7 +1087,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
}
- iwl_init_hw_rates(priv, priv->ieee_rates);
+ iwl_init_hw_rates(priv->ieee_rates);
return 0;
@@ -1639,29 +1101,25 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
{
iwl_free_geos(priv);
iwl_free_channel_map(priv);
- if (priv->tx_cmd_pool)
- kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
kfree(rcu_dereference_raw(priv->noa_data));
+ iwl_calib_free_results(priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
kfree(priv->wowlan_sram);
#endif
}
-
-
-static u32 iwl_hw_detect(struct iwl_priv *priv)
-{
- return iwl_read32(bus(priv), CSR_HW_REV);
-}
-
/* Size of one Rx buffer in host DRAM */
#define IWL_RX_BUF_SIZE_4K (4 * 1024)
#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-static int iwl_set_hw_params(struct iwl_priv *priv)
+static void iwl_set_hw_params(struct iwl_priv *priv)
{
+ if (cfg(priv)->ht_params)
+ hw_params(priv).use_rts_for_aggregation =
+ cfg(priv)->ht_params->use_rts_for_aggregation;
+
if (iwlagn_mod_params.amsdu_size_8K)
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_8K);
@@ -1670,49 +1128,46 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
get_order(IWL_RX_BUF_SIZE_4K);
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
- cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
hw_params(priv).num_ampdu_queues =
cfg(priv)->base_params->num_of_ampdu_queues;
- hw_params(priv).shadow_reg_enable =
- cfg(priv)->base_params->shadow_reg_enable;
- hw_params(priv).sku = cfg(priv)->sku;
hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
/* Device-specific setup */
- return cfg(priv)->lib->set_hw_params(priv);
+ cfg(priv)->lib->set_hw_params(priv);
}
static void iwl_debug_config(struct iwl_priv *priv)
{
- dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+ dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
#ifdef CONFIG_IWLWIFI_DEBUG
"enabled\n");
#else
"disabled\n");
#endif
- dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+ dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
#ifdef CONFIG_IWLWIFI_DEBUGFS
"enabled\n");
#else
"disabled\n");
#endif
- dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+ dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
"enabled\n");
#else
"disabled\n");
#endif
- dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+ dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
"enabled\n");
#else
"disabled\n");
#endif
- dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+ dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_P2P "
#ifdef CONFIG_IWLWIFI_P2P
"enabled\n");
#else
@@ -1720,46 +1175,77 @@ static void iwl_debug_config(struct iwl_priv *priv)
#endif
}
-int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
- struct iwl_cfg *cfg)
+static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
+ const struct iwl_fw *fw)
{
int err = 0;
struct iwl_priv *priv;
struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
u16 num_mac;
- u32 hw_rev;
+ u32 ucode_flags;
+ struct iwl_trans_config trans_cfg;
+ static const u8 no_reclaim_cmds[] = {
+ REPLY_RX_PHY_CMD,
+ REPLY_RX,
+ REPLY_RX_MPDU_CMD,
+ REPLY_COMPRESSED_BA,
+ STATISTICS_NOTIFICATION,
+ REPLY_TX,
+ };
/************************
* 1. Allocating HW data
************************/
hw = iwl_alloc_all();
if (!hw) {
- pr_err("%s: Cannot allocate network device\n", cfg->name);
+ pr_err("%s: Cannot allocate network device\n",
+ cfg(trans)->name);
err = -ENOMEM;
goto out;
}
- priv = hw->priv;
- priv->shrd = &priv->_shrd;
- bus->shrd = priv->shrd;
- priv->shrd->bus = bus;
- priv->shrd->priv = priv;
+ op_mode = hw->priv;
+ op_mode->ops = &iwl_dvm_ops;
+ priv = IWL_OP_MODE_GET_DVM(op_mode);
+ priv->shrd = trans->shrd;
+ priv->fw = fw;
+ /* TODO: remove fw from shared data later */
+ priv->shrd->fw = fw;
- priv->shrd->trans = trans_ops->alloc(priv->shrd);
- if (priv->shrd->trans == NULL) {
- err = -ENOMEM;
- goto out_free_traffic_mem;
+ /*
+ * Populate the state variables that the transport layer needs
+ * to know about.
+ */
+ trans_cfg.op_mode = op_mode;
+ trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+ trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+ ucode_flags = fw->ucode_capa.flags;
+
+#ifndef CONFIG_IWLWIFI_P2P
+ ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
+
+ if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
+ priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
+ trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ } else {
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+ trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
+ /* Configure transport layer */
+ iwl_trans_configure(trans(priv), &trans_cfg);
+
/* At this point both hw and priv are allocated. */
- SET_IEEE80211_DEV(hw, bus(priv)->dev);
+ SET_IEEE80211_DEV(priv->hw, trans(priv)->dev);
- /* what debugging capabilities we have */
+ /* show what debugging capabilities we have */
iwl_debug_config(priv);
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- cfg(priv) = cfg;
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
@@ -1778,47 +1264,34 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
- spin_lock_init(&bus(priv)->reg_lock);
- spin_lock_init(&priv->shrd->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ spin_lock_init(&trans(priv)->reg_lock);
+ spin_lock_init(&priv->statistics.lock);
/***********************
- * 3. Read REV register
+ * 2. Read REV register
***********************/
- hw_rev = iwl_hw_detect(priv);
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- cfg(priv)->name, hw_rev);
+ cfg(priv)->name, trans(priv)->hw_rev);
- err = iwl_trans_request_irq(trans(priv));
+ err = iwl_trans_start_hw(trans(priv));
if (err)
- goto out_free_trans;
-
- if (iwl_trans_prepare_card_hw(trans(priv))) {
- err = -EIO;
- IWL_WARN(priv, "Failed, HW not ready\n");
- goto out_free_trans;
- }
+ goto out_free_traffic_mem;
/*****************
- * 4. Read EEPROM
+ * 3. Read EEPROM
*****************/
- /* Read the EEPROM */
- err = iwl_eeprom_init(priv, hw_rev);
+ err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev);
+ /* Reset chip to save power until we load uCode during "up". */
+ iwl_trans_stop_hw(trans(priv));
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_free_trans;
+ goto out_free_traffic_mem;
}
err = iwl_eeprom_check_version(priv);
if (err)
goto out_free_eeprom;
- err = iwl_eeprom_check_sku(priv);
+ err = iwl_eeprom_init_hw_params(priv);
if (err)
goto out_free_eeprom;
@@ -1836,16 +1309,27 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
}
/************************
- * 5. Setup HW constants
+ * 4. Setup HW constants
************************/
- if (iwl_set_hw_params(priv)) {
- err = -ENOENT;
- IWL_ERR(priv, "failed to set hw parameters\n");
- goto out_free_eeprom;
+ iwl_set_hw_params(priv);
+
+ if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
+ IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
+ ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+ /*
+ * if not PAN, then don't support P2P -- might be a uCode
+ * packaging bug or due to the eeprom check above
+ */
+ ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+ trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+
+ /* Configure transport layer again*/
+ iwl_trans_configure(trans(priv), &trans_cfg);
}
/*******************
- * 6. Setup priv
+ * 5. Setup priv
*******************/
err = iwl_init_drv(priv);
@@ -1854,92 +1338,90 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
/* At this point both hw and priv are initialized. */
/********************
- * 7. Setup services
+ * 6. Setup services
********************/
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
iwl_testmode_init(priv);
- /*********************************************
- * 8. Enable interrupts
- *********************************************/
-
- iwl_enable_rfkill_int(priv);
+ iwl_power_initialize(priv);
+ iwl_tt_initialize(priv);
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(bus(priv),
- CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
+ snprintf(priv->hw->wiphy->fw_version,
+ sizeof(priv->hw->wiphy->fw_version),
+ "%s", fw->fw_version);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
+ priv->new_scan_threshold_behaviour =
+ !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- iwl_power_initialize(priv);
- iwl_tt_initialize(priv);
+ priv->phy_calib_chain_noise_reset_cmd =
+ fw->ucode_capa.standard_phy_calibration_size;
+ priv->phy_calib_chain_noise_gain_cmd =
+ fw->ucode_capa.standard_phy_calibration_size + 1;
- init_completion(&priv->firmware_loading_complete);
+ /* initialize all valid contexts */
+ iwl_init_context(priv, ucode_flags);
- err = iwl_request_firmware(priv, true);
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 7. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwlagn_mac_setup_register(priv, &fw->ucode_capa);
if (err)
goto out_destroy_workqueue;
- return 0;
+ err = iwl_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv,
+ "failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ return op_mode;
out_destroy_workqueue:
- destroy_workqueue(priv->shrd->workqueue);
- priv->shrd->workqueue = NULL;
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
iwl_eeprom_free(priv->shrd);
-out_free_trans:
- iwl_trans_free(trans(priv));
out_free_traffic_mem:
iwl_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
out:
- return err;
+ op_mode = NULL;
+ return op_mode;
}
-void __devexit iwl_remove(struct iwl_priv * priv)
+static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
{
- wait_for_completion(&priv->firmware_loading_complete);
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
- /* ieee80211_unregister_hw call wil cause iwlagn_mac_stop to
- * to be called and iwl_down since we are removing the device
- * we need to set STATUS_EXIT_PENDING bit.
- */
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
iwl_testmode_cleanup(priv);
iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
/*This will stop the queues, move the device to low power state */
+ priv->ucode_loaded = false;
iwl_trans_stop_device(trans(priv));
- iwl_dealloc_ucode(trans(priv));
-
iwl_eeprom_free(priv->shrd);
/*netif_stop_queue(dev); */
- flush_workqueue(priv->shrd->workqueue);
+ flush_workqueue(priv->workqueue);
/* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
- * priv->shrd->workqueue... so we can't take down the workqueue
+ * priv->workqueue... so we can't take down the workqueue
* until now... */
- destroy_workqueue(priv->shrd->workqueue);
- priv->shrd->workqueue = NULL;
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
iwl_free_traffic_mem(priv);
- iwl_trans_free(trans(priv));
-
iwl_uninit_drv(priv);
dev_kfree_skb(priv->beacon_skb);
@@ -1947,12 +1429,81 @@ void __devexit iwl_remove(struct iwl_priv * priv)
ieee80211_free_hw(priv->hw);
}
+static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ if (!iwl_check_for_ct_kill(priv)) {
+ IWL_ERR(priv, "Restarting adapter queue is full\n");
+ iwl_nic_error(op_mode);
+ }
+}
+
+static void iwl_nic_config(struct iwl_op_mode *op_mode)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ cfg(priv)->lib->nic_config(priv);
+}
+
+static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ set_bit(ac, &priv->transport_queue_stop);
+ ieee80211_stop_queue(priv->hw, ac);
+}
+
+static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ clear_bit(ac, &priv->transport_queue_stop);
+
+ if (!priv->passive_no_rx)
+ ieee80211_wake_queue(priv->hw, ac);
+}
+
+void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
+{
+ int ac;
+
+ if (!priv->passive_no_rx)
+ return;
+
+ for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) {
+ if (!test_bit(ac, &priv->transport_queue_stop)) {
+ IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d");
+ ieee80211_wake_queue(priv->hw, ac);
+ } else {
+ IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d");
+ }
+ }
+
+ priv->passive_no_rx = false;
+}
+
+const struct iwl_op_mode_ops iwl_dvm_ops = {
+ .start = iwl_op_mode_dvm_start,
+ .stop = iwl_op_mode_dvm_stop,
+ .rx = iwl_rx_dispatch,
+ .queue_full = iwl_stop_sw_queue,
+ .queue_not_full = iwl_wake_sw_queue,
+ .hw_rf_kill = iwl_set_hw_rfkill_state,
+ .free_skb = iwl_free_skb,
+ .nic_error = iwl_nic_error,
+ .cmd_queue_full = iwl_cmd_queue_full,
+ .nic_config = iwl_nic_config,
+};
/*****************************************************************************
*
* driver and module entry point
*
*****************************************************************************/
+
+struct kmem_cache *iwl_tx_cmd_pool;
+
static int __init iwl_init(void)
{
@@ -1960,20 +1511,27 @@ static int __init iwl_init(void)
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
+ iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
+ sizeof(struct iwl_device_cmd),
+ sizeof(void *), 0, NULL);
+ if (!iwl_tx_cmd_pool)
+ return -ENOMEM;
+
ret = iwlagn_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
+ goto error_rc_register;
}
ret = iwl_pci_register_driver();
-
if (ret)
- goto error_register;
+ goto error_pci_register;
return ret;
-error_register:
+error_pci_register:
iwlagn_rate_control_unregister();
+error_rc_register:
+ kmem_cache_destroy(iwl_tx_cmd_pool);
return ret;
}
@@ -1981,6 +1539,7 @@ static void __exit iwl_exit(void)
{
iwl_pci_unregister_driver();
iwlagn_rate_control_unregister();
+ kmem_cache_destroy(iwl_tx_cmd_pool);
}
module_exit(iwl_exit);
@@ -1994,8 +1553,6 @@ MODULE_PARM_DESC(debug, "debug output mask");
module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
@@ -2054,7 +1611,7 @@ MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
- "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
+ "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
module_param_named(power_save, iwlagn_mod_params.power_save,
bool, S_IRUGO);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index f84fb3c53563..3780a03f2716 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,16 +65,10 @@
#include "iwl-dev.h"
-struct iwlagn_ucode_capabilities {
- u32 max_probe_length;
- u32 standard_phy_calibration_size;
- u32 flags;
-};
+struct iwl_ucode_capabilities;
extern struct ieee80211_ops iwlagn_hw_ops;
-int iwl_reset_ict(struct iwl_trans *trans);
-
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
hdr->op_code = cmd;
@@ -83,16 +77,31 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
-void __iwl_down(struct iwl_priv *priv);
void iwl_down(struct iwl_priv *priv);
+void iwl_cancel_deferred_work(struct iwl_priv *priv);
void iwlagn_prepare_restart(struct iwl_priv *priv);
+void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
+void iwl_nic_error(struct iwl_op_mode *op_mode);
+
+bool iwl_check_for_ct_kill(struct iwl_priv *priv);
+
+void iwlagn_lift_passive_no_rx(struct iwl_priv *priv);
/* MAC80211 */
struct ieee80211_hw *iwl_alloc_all(void);
int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa);
+ const struct iwl_ucode_capabilities *capa);
void iwlagn_mac_unregister(struct iwl_priv *priv);
+/* commands */
+int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+ u32 flags, u16 len, const void *data);
+
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -107,8 +116,18 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_priv *priv);
+int iwl_init_alive_start(struct iwl_priv *priv);
+int iwl_run_init_ucode(struct iwl_priv *priv);
+int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
+ enum iwl_ucode_type ucode_type);
+int iwl_send_calib_results(struct iwl_priv *priv);
+int iwl_calib_set(struct iwl_priv *priv,
+ const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_priv *priv);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
@@ -120,8 +139,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
#ifdef CONFIG_PM_SLEEP
int iwlagn_send_patterns(struct iwl_priv *priv,
struct cfg80211_wowlan *wowlan);
-int iwlagn_suspend(struct iwl_priv *priv,
- struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
#endif
/* rx */
@@ -138,9 +156,9 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
@@ -175,7 +193,7 @@ void iwlagn_disable_roc(struct iwl_priv *priv);
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
@@ -216,6 +234,8 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
@@ -223,46 +243,12 @@ void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
+int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_sta *sta);
-/**
- * iwl_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-}
-
static inline int iwl_sta_id(struct ieee80211_sta *sta)
{
if (WARN_ON(!sta))
@@ -271,37 +257,6 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
}
-/**
- * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-
int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -351,7 +306,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
}
/* eeprom */
-void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
extern int iwl_alive_start(struct iwl_priv *priv);
@@ -388,4 +342,68 @@ void iwl_testmode_cleanup(struct iwl_priv *priv)
}
#endif
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid);
+#else
+static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid)
+{
+}
+#endif
+
+/* status checks */
+
+static inline int iwl_is_ready(struct iwl_priv *priv)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(STATUS_READY, &priv->status) &&
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
+ !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_is_alive(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_is_rfkill(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_is_ctkill(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_is_ready_rf(struct iwl_priv *priv)
+{
+ if (iwl_is_rfkill(priv))
+ return 0;
+
+ return iwl_is_ready(priv);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill((m))) \
+ IWL_ERR(m, fmt, ##args); \
+ else \
+ __iwl_err(trans(m)->dev, true, \
+ !iwl_have_debug_level(IWL_DL_RADIO), \
+ fmt, ##args); \
+} while (0)
+#else
+#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill((m))) \
+ IWL_ERR(m, fmt, ##args); \
+ else \
+ __iwl_err(trans(m)->dev, true, true, fmt, ##args); \
+} while (0)
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
deleted file mode 100644
index 940d5038b39c..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_bus_h__
-#define __iwl_bus_h__
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-
-/**
- * DOC: Bus layer - role and goal
- *
- * iwl-bus.h defines the API to the bus layer of the iwlwifi driver.
- * The bus layer is responsible for doing very basic bus operations that are
- * listed in the iwl_bus_ops structure.
- * The bus layer registers to the bus driver, advertises the supported HW and
- * gets notifications about enumeration, suspend, resume.
- * For the moment, the bus layer is not a linux kernel module as itself, and
- * the module_init function of the driver must call the bus specific
- * registration functions. These functions are listed at the end of this file.
- * For the moment, there is only one implementation of this interface: PCI-e.
- * This implementation is iwl-pci.c
- */
-
-/**
- * DOC: encapsulation and type safety
- *
- * The iwl_bus describes the data that is shared amongst all the bus layer
- * implementations. This data is visible to other layers. Data in the bus
- * specific area is not visible outside the bus specific implementation.
- * iwl_bus holds a pointer to iwl_shared which holds pointer to all the other
- * layers of the driver (iwl_priv, iwl_trans). In fact, this is the way to go
- * when the transport layer needs to call a function of another layer.
- *
- * In order to achieve encapsulation, iwl_priv cannot be dereferenced from the
- * bus layer. Type safety is still kept since functions that gets iwl_priv gets
- * a typed pointer (as opposed to void *).
- */
-
-/**
- * DOC: probe flow
- *
- * The module_init calls the bus specific registration function. The
- * registration to the bus layer will trigger an enumeration of the bus which
- * will call the bus specific probe function.
- * The first thing this function must do is to allocate the memory needed by
- * iwl_bus + the bus_specific data.
- * Once the bus specific probe function has configured the hardware, it
- * chooses the appropriate transport layer and calls iwl_probe that will run
- * the bus independent probe flow.
- *
- * Note: The bus specific code must set the following data in iwl_bus before it
- * calls iwl_probe:
- * * bus->dev
- * * bus->irq
- * * bus->ops
- */
-
-struct iwl_shared;
-struct iwl_bus;
-
-/**
- * struct iwl_bus_ops - bus specific operations
- * @get_pm_support: must returns true if the bus can go to sleep
- * @apm_config: will be called during the config of the APM
- * @get_hw_id_string: prints the hw_id in the provided buffer
- * @get_hw_id: get hw_id in u32
- * @write8: write a byte to register at offset ofs
- * @write32: write a dword to register at offset ofs
- * @wread32: read a dword at register at offset ofs
- */
-struct iwl_bus_ops {
- bool (*get_pm_support)(struct iwl_bus *bus);
- void (*apm_config)(struct iwl_bus *bus);
- void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
- u32 (*get_hw_id)(struct iwl_bus *bus);
- void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
- void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
- u32 (*read32)(struct iwl_bus *bus, u32 ofs);
-};
-
-/**
- * struct iwl_bus - bus common data
- *
- * This data is common to all bus layer implementations.
- *
- * @dev - pointer to struct device * that represents the device
- * @ops - pointer to iwl_bus_ops
- * @shrd - pointer to iwl_shared which holds shared data from the upper layer
- * NB: for the time being this needs to be set by the upper layer since
- * it allocates the shared data
- * @irq - the irq number for the device
- * @reg_lock - protect hw register access
- */
-struct iwl_bus {
- struct device *dev;
- const struct iwl_bus_ops *ops;
- struct iwl_shared *shrd;
-
- unsigned int irq;
- spinlock_t reg_lock;
-
- /* pointer to bus specific struct */
- /*Ensure that this pointer will always be aligned to sizeof pointer */
- char bus_specific[0] __attribute__((__aligned__(sizeof(void *))));
-};
-
-static inline bool bus_get_pm_support(struct iwl_bus *bus)
-{
- return bus->ops->get_pm_support(bus);
-}
-
-static inline void bus_apm_config(struct iwl_bus *bus)
-{
- bus->ops->apm_config(bus);
-}
-
-static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
- int buf_len)
-{
- bus->ops->get_hw_id_string(bus, buf, buf_len);
-}
-
-static inline u32 bus_get_hw_id(struct iwl_bus *bus)
-{
- return bus->ops->get_hw_id(bus);
-}
-
-static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
-{
- bus->ops->write8(bus, ofs, val);
-}
-
-static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val)
-{
- bus->ops->write32(bus, ofs, val);
-}
-
-static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
-{
- return bus->ops->read32(bus, ofs);
-}
-
-/*****************************************************
-* Bus layer registration functions
-******************************************************/
-int __must_check iwl_pci_register_driver(void);
-void iwl_pci_unregister_driver(void);
-
-#endif /* __iwl_bus_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index e1d78257e4a9..82152311d73b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,44 +68,46 @@
* This file declares the config structures for all devices.
*/
-extern struct iwl_cfg iwl5300_agn_cfg;
-extern struct iwl_cfg iwl5100_agn_cfg;
-extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bgn_cfg;
-extern struct iwl_cfg iwl5100_abg_cfg;
-extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6005_2agn_cfg;
-extern struct iwl_cfg iwl6005_2abg_cfg;
-extern struct iwl_cfg iwl6005_2bg_cfg;
-extern struct iwl_cfg iwl6005_2agn_sff_cfg;
-extern struct iwl_cfg iwl6005_2agn_d_cfg;
-extern struct iwl_cfg iwl1030_bgn_cfg;
-extern struct iwl_cfg iwl1030_bg_cfg;
-extern struct iwl_cfg iwl6030_2agn_cfg;
-extern struct iwl_cfg iwl6030_2abg_cfg;
-extern struct iwl_cfg iwl6030_2bgn_cfg;
-extern struct iwl_cfg iwl6030_2bg_cfg;
-extern struct iwl_cfg iwl6000i_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2abg_cfg;
-extern struct iwl_cfg iwl6000i_2bg_cfg;
-extern struct iwl_cfg iwl6000_3agn_cfg;
-extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl6150_bgn_cfg;
-extern struct iwl_cfg iwl6150_bg_cfg;
-extern struct iwl_cfg iwl1000_bgn_cfg;
-extern struct iwl_cfg iwl1000_bg_cfg;
-extern struct iwl_cfg iwl100_bgn_cfg;
-extern struct iwl_cfg iwl100_bg_cfg;
-extern struct iwl_cfg iwl130_bgn_cfg;
-extern struct iwl_cfg iwl130_bg_cfg;
-extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bgn_d_cfg;
-extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl105_bgn_cfg;
-extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_cfg iwl5300_agn_cfg;
+extern const struct iwl_cfg iwl5100_agn_cfg;
+extern const struct iwl_cfg iwl5350_agn_cfg;
+extern const struct iwl_cfg iwl5100_bgn_cfg;
+extern const struct iwl_cfg iwl5100_abg_cfg;
+extern const struct iwl_cfg iwl5150_agn_cfg;
+extern const struct iwl_cfg iwl5150_abg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_cfg;
+extern const struct iwl_cfg iwl6005_2abg_cfg;
+extern const struct iwl_cfg iwl6005_2bg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern const struct iwl_cfg iwl6005_2agn_d_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
+extern const struct iwl_cfg iwl1030_bgn_cfg;
+extern const struct iwl_cfg iwl1030_bg_cfg;
+extern const struct iwl_cfg iwl6030_2agn_cfg;
+extern const struct iwl_cfg iwl6030_2abg_cfg;
+extern const struct iwl_cfg iwl6030_2bgn_cfg;
+extern const struct iwl_cfg iwl6030_2bg_cfg;
+extern const struct iwl_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_cfg iwl6000i_2abg_cfg;
+extern const struct iwl_cfg iwl6000i_2bg_cfg;
+extern const struct iwl_cfg iwl6000_3agn_cfg;
+extern const struct iwl_cfg iwl6050_2agn_cfg;
+extern const struct iwl_cfg iwl6050_2abg_cfg;
+extern const struct iwl_cfg iwl6150_bgn_cfg;
+extern const struct iwl_cfg iwl6150_bg_cfg;
+extern const struct iwl_cfg iwl1000_bgn_cfg;
+extern const struct iwl_cfg iwl1000_bg_cfg;
+extern const struct iwl_cfg iwl100_bgn_cfg;
+extern const struct iwl_cfg iwl100_bg_cfg;
+extern const struct iwl_cfg iwl130_bgn_cfg;
+extern const struct iwl_cfg iwl130_bg_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern const struct iwl_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl105_bgn_cfg;
+extern const struct iwl_cfg iwl105_bgn_d_cfg;
+extern const struct iwl_cfg iwl135_bgn_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f822ac447c3b..9ed73e5154be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,22 +69,9 @@
#ifndef __iwl_commands_h__
#define __iwl_commands_h__
-#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
+#include <linux/types.h>
-struct iwl_priv;
-
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
-
-
-/* Tx rates */
-#define IWL_CCK_RATES 4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
enum {
REPLY_ALIVE = 0x1,
@@ -213,48 +200,6 @@ enum {
/* iwl_cmd_header flags value */
#define IWL_CMD_FAILED_MSK 0x40
-#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
-#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
-#define SEQ_RX_FRAME cpu_to_le16(0x8000)
-
-/**
- * struct iwl_cmd_header
- *
- * This header format appears in the beginning of each command sent from the
- * driver, and each response/notification received from uCode.
- */
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
- /*
- * The driver sets up the sequence number to values of its choosing.
- * uCode does not use this value, but passes it back to the driver
- * when sending the response to each driver-originated command, so
- * the driver can match the response to the command. Since the values
- * don't get used by uCode, the driver may set up an arbitrary format.
- *
- * There is one exception: uCode sets bit 15 when it originates
- * the response/notification, i.e. when the response/notification
- * is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_RX when it sends a received frame
- * to the driver; it is not a direct response to any driver command.
- *
- * The Linux driver uses the following format:
- *
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13:14 reserved
- * 15 unsolicited RX or uCode-originated notification
- */
- __le16 sequence;
-
- /* command or response/notification data follows immediately */
- u8 data[0];
-} __packed;
-
-
/**
* iwlagn rate_n_flags bit fields
*
@@ -3151,8 +3096,6 @@ struct iwl_enhance_sensitivity_cmd {
*/
/* Phy calibration command for series */
-/* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
IWL_PHY_CALIBRATE_DC_CMD = 8,
IWL_PHY_CALIBRATE_LO_CMD = 9,
@@ -3161,11 +3104,8 @@ enum {
IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18,
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
};
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-
/* This enum defines the bitmap of various calibrations to enable in both
* init ucode and runtime ucode through CALIBRATION_CFG_CMD.
*/
@@ -3905,50 +3845,6 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed;
-/******************************************************************************
- * (13)
- * Union of all expected notifications/responses:
- *
- *****************************************************************************/
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
-struct iwl_rx_packet {
- /*
- * The first 4 bytes of the RX frame header contain both the RX frame
- * size and some flags.
- * Bit fields:
- * 31: flag flush RB request
- * 30: flag ignore TC (terminal counter) request
- * 29: flag fast IRQ request
- * 28-14: Reserved
- * 13-00: RX frame size
- */
- __le32 len_n_flags;
- struct iwl_cmd_header hdr;
- union {
- struct iwl_alive_resp alive_frame;
- struct iwl_spectrum_notification spectrum_notif;
- struct iwl_csa_notification csa_notif;
- struct iwl_error_resp err_resp;
- struct iwl_card_state_notif card_state_notif;
- struct iwl_add_sta_resp add_sta;
- struct iwl_rem_sta_resp rem_sta;
- struct iwl_sleep_notification sleep_notif;
- struct iwl_spectrum_resp spectrum;
- struct iwl_notif_statistics stats;
- struct iwl_bt_notif_statistics stats_bt;
- struct iwl_compressed_ba_resp compressed_ba;
- struct iwl_missed_beacon_notif missed_beacon;
- struct iwl_coex_medium_notification coex_medium_notif;
- struct iwl_coex_event_resp coex_event;
- struct iwl_bt_coex_profile_notif bt_coex_profile_notif;
- __le32 status;
- u8 raw[0];
- } u;
-} __packed;
-
-int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
-
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 7bcfa781e0b9..46490d3b95b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -38,7 +38,6 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-power.h"
-#include "iwl-agn.h"
#include "iwl-shared.h"
#include "iwl-agn.h"
#include "iwl-trans.h"
@@ -114,7 +113,7 @@ int iwl_init_geos(struct iwl_priv *priv)
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
return 0;
}
@@ -137,7 +136,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
@@ -147,7 +146,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = rates;
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
@@ -202,19 +201,18 @@ int iwl_init_geos(struct iwl_priv *priv)
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
- char buf[32];
- bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
+ hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your %s to maintainer.\n", buf);
- cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+ "Please send your %s to maintainer.\n",
+ trans(priv)->hw_id_str);
+ hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
- set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
return 0;
}
@@ -226,7 +224,7 @@ void iwl_free_geos(struct iwl_priv *priv)
{
kfree(priv->ieee_channels);
kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
+ clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
}
static bool iwl_is_channel_extension(struct iwl_priv *priv,
@@ -318,7 +316,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
conf = &priv->hw->conf;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
@@ -371,7 +369,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
le32_to_cpu(ctx->timing.beacon_init_val),
le16_to_cpu(ctx->timing.atim_window));
- return iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_timing_cmd,
+ return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
}
@@ -644,7 +642,7 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
* NOTE: Does not commit to the hardware; it sets appropriate bit fields
* in the staging RXON flag structure based on the ch->band
*/
-int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx)
{
enum ieee80211_band band = ch->band;
@@ -652,7 +650,7 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
if ((le16_to_cpu(ctx->staging.channel) == channel) &&
(priv->band == band))
- return 0;
+ return;
ctx->staging.channel = cpu_to_le16(channel);
if (band == IEEE80211_BAND_5GHZ)
@@ -664,7 +662,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
- return 0;
}
void iwl_set_flags_for_band(struct iwl_priv *priv,
@@ -801,11 +798,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
- &priv->shrd->status))
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
}
@@ -832,24 +828,32 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
}
#endif
-void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
+static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
{
unsigned int reload_msec;
unsigned long reload_jiffies;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
+ iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
+#endif
+
+ /* uCode is no longer loaded. */
+ priv->ucode_loaded = false;
+
/* Set the FW error flag -- cleared on iwl_down */
set_bit(STATUS_FW_ERROR, &priv->shrd->status);
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
- iwl_abort_notification_waits(priv->shrd);
+ iwl_abort_notification_waits(&priv->notif_wait);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
- clear_bit(STATUS_READY, &priv->shrd->status);
+ clear_bit(STATUS_READY, &priv->status);
- wake_up(&priv->shrd->wait_command_queue);
+ wake_up(&trans(priv)->wait_command_queue);
if (!ondemand) {
/*
@@ -872,140 +876,17 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
priv->reload_count = 0;
}
- if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
if (iwlagn_mod_params.restart_fw) {
IWL_DEBUG_FW_ERRORS(priv,
"Restarting adapter due to uCode error.\n");
- queue_work(priv->shrd->workqueue, &priv->restart);
+ queue_work(priv->workqueue, &priv->restart);
} else
IWL_DEBUG_FW_ERRORS(priv,
"Detected FW error, but not restarting\n");
}
}
-static int iwl_apm_stop_master(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* stop device's busmaster DMA activity */
- iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
- ret = iwl_poll_bit(bus(priv), CSR_RESET,
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- if (ret)
- IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
- IWL_DEBUG_INFO(priv, "stop master\n");
-
- return ret;
-}
-
-void iwl_apm_stop(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
- clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
-
- /* Stop device's DMA activity */
- iwl_apm_stop_master(priv);
-
- /* Reset the entire device */
- iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-int iwl_apm_init(struct iwl_priv *priv)
-{
- int ret = 0;
- IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- */
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
- bus_apm_config(bus(priv));
-
- /* Configure analog phase-lock-loop before activating to D0A */
- if (cfg(priv)->base_params->pll_cfg_val)
- iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
- cfg(priv)->base_params->pll_cfg_val);
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(priv, "Failed to init the card\n");
- goto out;
- }
-
- /*
- * Enable DMA clock and wait for it to stabilize.
- *
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
- */
- iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
-
- /* Disable L1-Active */
- iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
- set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
-
-out:
- return ret;
-}
-
-
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
int ret;
@@ -1013,7 +894,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
bool defer;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (priv->tx_power_user_lmt == tx_power && !force)
return 0;
@@ -1033,7 +914,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return -EIO;
/* scan complete and commit_rxon use tx_power_next value,
@@ -1041,7 +922,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
priv->tx_power_next = tx_power;
/* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
if (defer && !force) {
IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
@@ -1079,7 +960,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
+ if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -1092,12 +973,12 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
};
if (flags & CMD_ASYNC)
- return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
CMD_ASYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
else
- return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
CMD_SYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
@@ -1124,7 +1005,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
{
u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
- if (iwl_get_debug_level(priv->shrd) & IWL_DL_TX) {
+ if (iwl_have_debug_level(IWL_DL_TX)) {
if (!priv->tx_traffic) {
priv->tx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1132,7 +1013,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
return -ENOMEM;
}
}
- if (iwl_get_debug_level(priv->shrd) & IWL_DL_RX) {
+ if (iwl_have_debug_level(IWL_DL_RX)) {
if (!priv->rx_traffic) {
priv->rx_traffic =
kzalloc(traffic_size, GFP_KERNEL);
@@ -1159,7 +1040,7 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
__le16 fc;
u16 len;
- if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_TX)))
+ if (likely(!iwl_have_debug_level(IWL_DL_TX)))
return;
if (!priv->tx_traffic)
@@ -1183,7 +1064,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
__le16 fc;
u16 len;
- if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_RX)))
+ if (likely(!iwl_have_debug_level(IWL_DL_RX)))
return;
if (!priv->rx_traffic)
@@ -1340,7 +1221,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
static void iwl_force_rf_reset(struct iwl_priv *priv)
{
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (!iwl_is_any_associated(priv)) {
@@ -1365,7 +1246,7 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
{
struct iwl_force_reset *force_reset;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
if (mode >= IWL_MAX_FORCE_RESET) {
@@ -1421,7 +1302,7 @@ int iwl_cmd_echo_test(struct iwl_priv *priv)
.flags = CMD_SYNC,
};
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret)
IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
else
@@ -1455,30 +1336,20 @@ void iwl_bg_watchdog(unsigned long data)
int cnt;
unsigned long timeout;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
return;
- timeout = cfg(priv)->base_params->wd_timeout;
+ timeout = hw_params(priv).wd_timeout;
if (timeout == 0)
return;
- /* monitor and check for stuck cmd queue */
- if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
- return;
-
- /* monitor and check for other stuck queues */
- if (iwl_is_any_associated(priv)) {
- for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == priv->shrd->cmd_queue)
- continue;
- if (iwl_check_stuck_queue(priv, cnt))
- return;
- }
- }
+ /* monitor and check for stuck queues */
+ for (cnt = 0; cnt < cfg(priv)->base_params->num_of_queues; cnt++)
+ if (iwl_check_stuck_queue(priv, cnt))
+ return;
mod_timer(&priv->watchdog, jiffies +
msecs_to_jiffies(IWL_WD_TICK(timeout)));
@@ -1486,7 +1357,7 @@ void iwl_bg_watchdog(unsigned long data)
void iwl_setup_watchdog(struct iwl_priv *priv)
{
- unsigned int timeout = cfg(priv)->base_params->wd_timeout;
+ unsigned int timeout = hw_params(priv).wd_timeout;
if (!iwlagn_mod_params.wd_disable) {
/* use system default */
@@ -1580,31 +1451,30 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
return cpu_to_le32(res);
}
-void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
+void iwl_nic_error(struct iwl_op_mode *op_mode)
{
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ iwlagn_fw_error(priv, false);
}
-void iwl_nic_config(struct iwl_priv *priv)
+void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
- cfg(priv)->lib->nic_config(priv);
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ if (state)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
}
-void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
+void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+ kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
dev_kfree_skb_any(skb);
}
-
-void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac)
-{
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac)
-{
- ieee80211_wake_queue(priv->hw, ac);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7bf76ab94dd2..635eb685edeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,13 +76,7 @@ struct iwl_cmd;
struct iwl_lib_ops {
/* set hw dependent parameters */
- int (*set_hw_params)(struct iwl_priv *priv);
- /* setup BT Rx handler */
- void (*bt_rx_handler_setup)(struct iwl_priv *priv);
- /* setup BT related deferred work */
- void (*bt_setup_deferred_work)(struct iwl_priv *priv);
- /* cancel deferred work */
- void (*cancel_deferred_work)(struct iwl_priv *priv);
+ void (*set_hw_params)(struct iwl_priv *priv);
int (*set_channel_switch)(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch);
/* device specific configuration */
@@ -95,72 +89,6 @@ struct iwl_lib_ops {
void (*temperature)(struct iwl_priv *priv);
};
-/*
- * @max_ll_items: max number of OTP blocks
- * @shadow_ram_support: shadow support for OTP memory
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @adv_thermal_throttle: support advance thermal throttle
- * @support_ct_kill_exit: support ct kill exit condition
- * @support_wimax_coexist: support wimax/wifi co-exist
- * @plcp_delta_threshold: plcp error rate threshold used to trigger
- * radio tuning when there is a high receiving plcp error rate
- * @chain_noise_scale: default chain noise scale used for gain computation
- * @wd_timeout: TX queues watchdog timeout
- * @max_event_log_size: size of event log buffer size for ucode event logging
- * @shadow_reg_enable: HW shadhow register bit
- * @no_idle_support: do not support idle mode
- * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
- * wd_disable: disable watchdog timer
- */
-struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues;/* def: HW dependent */
- /* for iwl_apm_init() */
- u32 pll_cfg_val;
-
- const u16 max_ll_items;
- const bool shadow_ram_support;
- u16 led_compensation;
- bool adv_thermal_throttle;
- bool support_ct_kill_exit;
- const bool support_wimax_coexist;
- u8 plcp_delta_threshold;
- s32 chain_noise_scale;
- unsigned int wd_timeout;
- u32 max_event_log_size;
- const bool shadow_reg_enable;
- const bool no_idle_support;
- const bool hd_v2;
- const bool wd_disable;
-};
-/*
- * @advanced_bt_coexist: support advanced bt coexist
- * @bt_init_traffic_load: specify initial bt traffic load
- * @bt_prio_boost: default bt priority boost value
- * @agg_time_limit: maximum number of uSec in aggregation
- * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
- */
-struct iwl_bt_params {
- bool advanced_bt_coexist;
- u8 bt_init_traffic_load;
- u8 bt_prio_boost;
- u16 agg_time_limit;
- bool bt_sco_disable;
- bool bt_session_2;
-};
-/*
- * @use_rts_for_aggregation: use rts/cts protection for HT traffic
- */
-struct iwl_ht_params {
- const bool ht_greenfield_support; /* if used set to true */
- bool use_rts_for_aggregation;
- enum ieee80211_smps_mode smps_mode;
-};
-
/***************************
* L i b *
***************************/
@@ -169,7 +97,7 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx);
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -197,6 +125,8 @@ const char *get_ctrl_string(int cmd);
void iwl_clear_traffic_stats(struct iwl_priv *priv);
void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
u16 len);
+void iwl_reset_traffic_log(struct iwl_priv *priv);
+
#else
static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv)
{
@@ -242,8 +172,6 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
-u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
@@ -263,6 +191,10 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
+/* traffic log definitions */
+#define IWL_TRAFFIC_ENTRIES (256)
+#define IWL_TRAFFIC_ENTRY_SIZE (64)
+
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
@@ -297,12 +229,6 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
cfg(priv)->bt_params->advanced_bt_coexist;
}
-static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
extern bool bt_siso_mode;
#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index fbc3095c7b44..5f96ce105f08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 1b20c4fb791b..059efabda184 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,17 +61,67 @@
*
*****************************************************************************/
-#include "iwl-trans.h"
+#include <linux/interrupt.h>
+#include "iwl-debug.h"
-int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
- u32 flags, u16 len, const void *data)
+#define __iwl_fn(fn) \
+void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
+{ \
+ struct va_format vaf = { \
+ .fmt = fmt, \
+ }; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ vaf.va = &args; \
+ dev_ ##fn(dev, "%pV", &vaf); \
+ trace_iwlwifi_ ##fn(&vaf); \
+ va_end(args); \
+}
+
+__iwl_fn(warn)
+__iwl_fn(info)
+__iwl_fn(crit)
+
+void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
+ const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ if (!trace_only) {
+ if (rfkill_prefix)
+ dev_err(dev, "(RFKILL) %pV", &vaf);
+ else
+ dev_err(dev, "%pV", &vaf);
+ }
+ trace_iwlwifi_err(&vaf);
+ va_end(args);
+}
+
+#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+void __iwl_dbg(struct device *dev,
+ u32 level, bool limit, const char *function,
+ const char *fmt, ...)
{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = { len, },
- .data = { data, },
- .flags = flags,
+ struct va_format vaf = {
+ .fmt = fmt,
};
+ va_list args;
- return iwl_trans_send_cmd(trans, &cmd);
+ va_start(args, fmt);
+ vaf.va = &args;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_have_debug_level(level) &&
+ (!limit || net_ratelimit()))
+ dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
+ function, &vaf);
+#endif
+ trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
+ va_end(args);
}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index f8fc2393dd4c..a6b32a11e103 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -29,16 +29,34 @@
#ifndef __iwl_debug_h__
#define __iwl_debug_h__
-#include "iwl-bus.h"
#include "iwl-shared.h"
+#include "iwl-devtrace.h"
struct iwl_priv;
-/*No matter what is m (priv, bus, trans), this will work */
-#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
-#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
-#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
-#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
+void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
+ const char *fmt, ...);
+void __iwl_warn(struct device *dev, const char *fmt, ...);
+void __iwl_info(struct device *dev, const char *fmt, ...);
+void __iwl_crit(struct device *dev, const char *fmt, ...);
+
+/* No matter what is m (priv, bus, trans), this will work */
+#define IWL_ERR(m, f, a...) __iwl_err(trans(m)->dev, false, false, f, ## a)
+#define IWL_WARN(m, f, a...) __iwl_warn(trans(m)->dev, f, ## a)
+#define IWL_INFO(m, f, a...) __iwl_info(trans(m)->dev, f, ## a)
+#define IWL_CRIT(m, f, a...) __iwl_crit(trans(m)->dev, f, ## a)
+
+#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+void __iwl_dbg(struct device *dev,
+ u32 level, bool limit, const char *function,
+ const char *fmt, ...);
+#else
+static inline void
+__iwl_dbg(struct device *dev,
+ u32 level, bool limit, const char *function,
+ const char *fmt, ...)
+{}
+#endif
#define iwl_print_hex_error(m, p, len) \
do { \
@@ -46,54 +64,20 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, ...) \
-do { \
- if (iwl_get_debug_level((m)->shrd) & (level)) \
- dev_err(bus(m)->dev, "%c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__, \
- ##__VA_ARGS__); \
-} while (0)
-
-#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
-do { \
- if (iwl_get_debug_level((m)->shrd) & (level) && \
- net_ratelimit()) \
- dev_err(bus(m)->dev, "%c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__, \
- ##__VA_ARGS__); \
-} while (0)
+#define IWL_DEBUG(m, level, fmt, args...) \
+ __iwl_dbg(trans(m)->dev, level, false, __func__, fmt, ##args)
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
+ __iwl_dbg(trans(m)->dev, level, true, __func__, fmt, ##args)
+#ifdef CONFIG_IWLWIFI_DEBUG
#define iwl_print_hex_dump(m, level, p, len) \
do { \
- if (iwl_get_debug_level((m)->shrd) & level) \
+ if (iwl_have_debug_level(level)) \
print_hex_dump(KERN_DEBUG, "iwl data: ", \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-
-#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
-do { \
- if (!iwl_is_rfkill(p->shrd)) \
- dev_err(bus(p)->dev, "%s%c %s " fmt, \
- "", \
- in_interrupt() ? 'I' : 'U', __func__, \
- ##__VA_ARGS__); \
- else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
- dev_err(bus(p)->dev, "%s%c %s " fmt, \
- "(RFKILL) ", \
- in_interrupt() ? 'I' : 'U', __func__, \
- ##__VA_ARGS__); \
-} while (0)
-
#else
-#define IWL_DEBUG(m, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
#define iwl_print_hex_dump(m, level, p, len)
-#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
-do { \
- if (!iwl_is_rfkill(p->shrd)) \
- IWL_ERR(p, fmt, ##args); \
-} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 04a3343f4610..b7b1c04f2fba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -230,16 +230,18 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
int pos = 0;
int sram;
struct iwl_priv *priv = file->private_data;
+ const struct fw_img *img;
size_t bufsz;
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
- struct iwl_trans *trans = trans(priv);
priv->dbgfs_sram_offset = 0x800000;
- if (trans->shrd->ucode_type == IWL_UCODE_INIT)
- priv->dbgfs_sram_len = trans->ucode_init.data.len;
- else
- priv->dbgfs_sram_len = trans->ucode_rt.data.len;
+ if (!priv->ucode_loaded) {
+ IWL_ERR(priv, "No uCode has been loadded.\n");
+ return -EINVAL;
+ }
+ img = &priv->fw->img[priv->shrd->ucode_type];
+ priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
}
len = priv->dbgfs_sram_len;
@@ -263,7 +265,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */
- val = iwl_read_targ_mem(bus(priv), sram);
+ val = iwl_read_targ_mem(trans(priv), sram);
for (; len; len--) {
/* put the address at the start of every line */
@@ -282,7 +284,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) {
sram += 4;
offset = 0;
- val = iwl_read_targ_mem(bus(priv), sram);
+ val = iwl_read_targ_mem(trans(priv), sram);
}
/* put in extra spaces and split lines for human readability */
@@ -336,13 +338,14 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
+ const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN];
if (!priv->wowlan_sram)
return -ENODATA;
return simple_read_from_buffer(user_buf, count, ppos,
priv->wowlan_sram,
- trans(priv)->ucode_wowlan.data.len);
+ img->sec[IWL_UCODE_SECTION_DATA].len);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -455,7 +458,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
char *buf;
ssize_t ret;
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status))
+ if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -526,32 +529,26 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->shrd->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->shrd->status));
+ test_bit(STATUS_CT_KILL, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->shrd->status));
+ test_bit(STATUS_ALIVE, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->shrd->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->shrd->status));
+ test_bit(STATUS_READY, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status));
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->shrd->status));
+ test_bit(STATUS_EXIT_PENDING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->shrd->status));
+ test_bit(STATUS_STATISTICS, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->shrd->status));
+ test_bit(STATUS_SCANNING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status));
+ test_bit(STATUS_SCAN_ABORTING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->shrd->status));
+ test_bit(STATUS_SCAN_HW, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
test_bit(STATUS_POWER_PMI, &priv->shrd->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
@@ -757,14 +754,14 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
return -EINVAL;
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return -EAGAIN;
priv->power_data.debug_sleep_level_override = value;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_power_update_mode(priv, true);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -835,7 +832,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
char *buf;
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (hw_params(priv).max_txq_num * 32 * 8) + 400;
+ (cfg(priv)->base_params->num_of_queues * 32 * 8) + 400;
const u8 *ptr;
ssize_t ret;
@@ -844,8 +841,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
IWL_ERR(priv, "Can not allocate buffer\n");
return -ENOMEM;
}
- if (priv->tx_traffic &&
- (iwl_get_debug_level(priv->shrd) & IWL_DL_TX)) {
+ if (priv->tx_traffic && iwl_have_debug_level(IWL_DL_TX)) {
ptr = priv->tx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
"Tx Traffic idx: %u\n", priv->tx_traffic_idx);
@@ -863,8 +859,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
}
}
- if (priv->rx_traffic &&
- (iwl_get_debug_level(priv->shrd) & IWL_DL_RX)) {
+ if (priv->rx_traffic && iwl_have_debug_level(IWL_DL_RX)) {
ptr = priv->rx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
"Rx Traffic idx: %u\n", priv->rx_traffic_idx);
@@ -919,6 +914,8 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
int p = 0;
u32 flag;
+ lockdep_assert_held(&priv->statistics.lock);
+
flag = le32_to_cpu(priv->statistics.flag);
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
@@ -952,7 +949,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
struct statistics_rx_non_phy *delta_general, *max_general;
struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -966,6 +963,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+ spin_lock_bh(&priv->statistics.lock);
ofdm = &priv->statistics.rx_ofdm;
cck = &priv->statistics.rx_cck;
general = &priv->statistics.rx_non_phy;
@@ -1362,6 +1360,8 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
accum_ht->unsupport_mcs,
delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1378,7 +1378,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
ssize_t ret;
struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1391,6 +1391,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+ spin_lock_bh(&priv->statistics.lock);
+
tx = &priv->statistics.tx;
accum_tx = &priv->accum_stats.tx;
delta_tx = &priv->delta_stats.tx;
@@ -1540,19 +1542,25 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ if ((hw_params(priv).valid_tx_ant & ANT_A) &&
+ tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ if ((hw_params(priv).valid_tx_ant & ANT_B) &&
+ tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ if ((hw_params(priv).valid_tx_ant & ANT_C) &&
+ tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
tx->tx_power.ant_c);
}
+
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1572,7 +1580,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct statistics_div *div, *accum_div, *delta_div, *max_div;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1585,6 +1593,9 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+
+ spin_lock_bh(&priv->statistics.lock);
+
general = &priv->statistics.common;
dbg = &priv->statistics.common.dbg;
div = &priv->statistics.common.div;
@@ -1669,6 +1680,9 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
accum_general->num_of_sos_states,
delta_general->num_of_sos_states,
max_general->num_of_sos_states);
+
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1685,16 +1699,16 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
ssize_t ret;
struct statistics_bt_activity *bt, *accum_bt;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
if (!priv->bt_enable_flag)
return -EINVAL;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
if (ret) {
IWL_ERR(priv,
@@ -1712,6 +1726,9 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
+
+ spin_lock_bh(&priv->statistics.lock);
+
bt = &priv->statistics.bt_activity;
accum_bt = &priv->accum_stats.bt_activity;
@@ -1757,6 +1774,8 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
le32_to_cpu(priv->statistics.num_bt_kills),
priv->statistics.accum_num_bt_kills);
+ spin_unlock_bh(&priv->statistics.lock);
+
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1773,7 +1792,7 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
(sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
ssize_t ret;
- if (!iwl_is_alive(priv->shrd))
+ if (!iwl_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2055,7 +2074,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
const size_t bufsz = sizeof(buf);
u32 pwrsave_status;
- pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
+ pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) &
CSR_GP_REG_POWER_SAVE_STATUS_MSK;
pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2085,9 +2104,9 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return -EFAULT;
/* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -2131,9 +2150,10 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
if (trace) {
priv->event_log.ucode_trace = true;
- /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
- mod_timer(&priv->ucode_trace,
- jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ if (iwl_is_alive(priv)) {
+ /* start collecting data now */
+ mod_timer(&priv->ucode_trace, jiffies);
+ }
} else {
priv->event_log.ucode_trace = false;
del_timer_sync(&priv->ucode_trace);
@@ -2219,7 +2239,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
- cfg(priv)->base_params->plcp_delta_threshold);
+ priv->plcp_delta_threshold);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -2241,10 +2261,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
return -EINVAL;
if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
- cfg(priv)->base_params->plcp_delta_threshold =
+ priv->plcp_delta_threshold =
IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
else
- cfg(priv)->base_params->plcp_delta_threshold = plcp;
+ priv->plcp_delta_threshold = plcp;
return count;
}
@@ -2320,7 +2340,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
if (sscanf(buf, "%d", &flush) != 1)
return -EINVAL;
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
return -EFAULT;
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
@@ -2346,7 +2366,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
timeout = IWL_DEF_WD_TIMEOUT;
- cfg(priv)->base_params->wd_timeout = timeout;
+ hw_params(priv).wd_timeout = timeout;
iwl_setup_watchdog(priv);
return count;
}
@@ -2409,7 +2429,7 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
if (cfg(priv)->ht_params)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
- (cfg(priv)->ht_params->use_rts_for_aggregation) ?
+ (hw_params(priv).use_rts_for_aggregation) ?
"rts/cts" : "cts-to-self");
else
pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2436,9 +2456,9 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
if (sscanf(buf, "%d", &rts) != 1)
return -EINVAL;
if (rts)
- cfg(priv)->ht_params->use_rts_for_aggregation = true;
+ hw_params(priv).use_rts_for_aggregation = true;
else
- cfg(priv)->ht_params->use_rts_for_aggregation = false;
+ hw_params(priv).use_rts_for_aggregation = false;
return count;
}
@@ -2484,52 +2504,6 @@ DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
DEBUGFS_WRITE_FILE_OPS(echo_test);
-#ifdef CONFIG_IWLWIFI_DEBUG
-static ssize_t iwl_dbgfs_debug_level_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_shared *shrd = priv->shrd;
- char buf[11];
- int len;
-
- len = scnprintf(buf, sizeof(buf), "0x%.8x",
- iwl_get_debug_level(shrd));
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_dbgfs_debug_level_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_shared *shrd = priv->shrd;
- char buf[11];
- unsigned long val;
- int ret;
-
- if (count > sizeof(buf))
- return -EINVAL;
-
- memset(buf, 0, sizeof(buf));
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- shrd->dbg_level_dev = val;
- if (iwl_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- return count;
-}
-DEBUGFS_READ_WRITE_FILE_OPS(debug_level);
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
/*
* Create the debugfs files and directories
*
@@ -2594,9 +2568,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_DEBUG
- DEBUGFS_ADD_FILE(debug_level, dir_debug, S_IRUSR | S_IWUSR);
-#endif
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
&priv->disable_sens_cal);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index e54a4d11e584..16956b777f96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -36,20 +36,20 @@
#include <linux/wait.h>
#include <linux/leds.h>
#include <linux/slab.h>
-#include <net/ieee80211_radiotap.h>
+#include <linux/mutex.h>
#include "iwl-eeprom.h"
#include "iwl-csr.h"
-#include "iwl-prph.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
#include "iwl-agn-tt.h"
-#include "iwl-bus.h"
#include "iwl-trans.h"
#include "iwl-shared.h"
+#include "iwl-op-mode.h"
+#include "iwl-notif-wait.h"
struct iwl_tx_queue;
@@ -292,117 +292,8 @@ struct iwl_vif_priv {
u8 ibss_bssid_sta_id;
};
-/* v1/v2 uCode file layout */
-struct iwl_ucode_header {
- __le32 ver; /* major/minor/API/serial */
- union {
- struct {
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v1;
- struct {
- __le32 build; /* build number */
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v2;
- } u;
-};
-
-/*
- * new TLV uCode file layout
- *
- * The new TLV file format contains TLVs, that each specify
- * some piece of data. To facilitate "groups", for example
- * different instruction image with different capabilities,
- * bundled with the same init image, an alternative mechanism
- * is provided:
- * When the alternative field is 0, that means that the item
- * is always valid. When it is non-zero, then it is only
- * valid in conjunction with items of the same alternative,
- * in which case the driver (user) selects one alternative
- * to use.
- */
-
-enum iwl_ucode_tlv_type {
- IWL_UCODE_TLV_INVALID = 0, /* unused */
- IWL_UCODE_TLV_INST = 1,
- IWL_UCODE_TLV_DATA = 2,
- IWL_UCODE_TLV_INIT = 3,
- IWL_UCODE_TLV_INIT_DATA = 4,
- IWL_UCODE_TLV_BOOT = 5,
- IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
- IWL_UCODE_TLV_PAN = 7,
- IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
- IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
- IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
- IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
- IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
- IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
- IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
- IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
- IWL_UCODE_TLV_WOWLAN_INST = 16,
- IWL_UCODE_TLV_WOWLAN_DATA = 17,
- IWL_UCODE_TLV_FLAGS = 18,
-};
-
-/**
- * enum iwl_ucode_tlv_flag - ucode API flags
- * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
- * was a separate TLV but moved here to save space.
- * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
- * treats good CRC threshold as a boolean
- * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- */
-enum iwl_ucode_tlv_flag {
- IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
- IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
- IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
-};
-
-struct iwl_ucode_tlv {
- __le16 type; /* see above */
- __le16 alternative; /* see comment */
- __le32 length; /* not including type/length fields */
- u8 data[0];
-} __packed;
-
-#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
-
-struct iwl_tlv_ucode_header {
- /*
- * The TLV style ucode header is distinguished from
- * the v1/v2 style header by first four bytes being
- * zero, as such is an invalid combination of
- * major/minor/API/serial versions.
- */
- __le32 zero;
- __le32 magic;
- u8 human_readable[64];
- __le32 ver; /* major/minor/API/serial */
- __le32 build;
- __le64 alternatives; /* bitmask of valid alternatives */
- /*
- * The data contained herein has a TLV layout,
- * see above for the TLV header and types.
- * Note that each TLV is padded to a length
- * that is a multiple of 4 for alignment.
- */
- u8 data[0];
-};
-
struct iwl_sensitivity_ranges {
u16 min_nrg_cck;
- u16 max_nrg_cck;
u16 nrg_th_cck;
u16 nrg_th_ofdm;
@@ -550,9 +441,6 @@ struct iwl_chain_noise_data {
u8 state;
};
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
enum {
MEASUREMENT_READY = (1 << 0),
MEASUREMENT_ACTIVE = (1 << 1),
@@ -661,7 +549,7 @@ struct traffic_stats {
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
*/
-#define UCODE_TRACE_PERIOD (100)
+#define UCODE_TRACE_PERIOD (10)
/*
* iwl_event_log: current uCode event log position
@@ -781,11 +669,6 @@ struct iwl_rxon_context {
bool enabled, is_40mhz;
u8 extension_chan_offset;
} ht;
-
- u8 bssid[ETH_ALEN];
- bool preauth_bssid;
-
- bool last_tx_rejected;
};
enum iwl_scan_type {
@@ -804,11 +687,11 @@ struct iwl_testmode_trace {
dma_addr_t dma_addr;
bool trace_enabled;
};
-struct iwl_testmode_sram {
+struct iwl_testmode_mem {
u32 buff_size;
u32 num_chunks;
u8 *buff_addr;
- bool sram_readed;
+ bool read_in_progress;
};
#endif
@@ -818,32 +701,55 @@ struct iwl_wipan_noa_data {
u8 data[];
};
+#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
+ ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
+
+#define IWL_MAC80211_GET_DVM(_hw) \
+ ((struct iwl_priv *) ((struct iwl_op_mode *) \
+ (_hw)->priv)->op_mode_specific)
+
struct iwl_priv {
/*data shared among all the driver's layers */
- struct iwl_shared _shrd;
struct iwl_shared *shrd;
+ const struct iwl_fw *fw;
+ unsigned long status;
+
+ spinlock_t sta_lock;
+ struct mutex mutex;
+
+ unsigned long transport_queue_stop;
+ bool passive_no_rx;
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
- struct kmem_cache *tx_cmd_pool;
+
+ struct list_head calib_results;
+
+ struct workqueue_struct *workqueue;
enum ieee80211_band band;
void (*pre_rx_handler)(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ struct iwl_rx_cmd_buffer *rxb);
int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+ struct iwl_notif_wait_data notif_wait;
+
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
+#define IWL_OWNERSHIP_DRIVER 0
+#define IWL_OWNERSHIP_TM 1
+ u8 ucode_owner;
+
/* ucode beacon time */
u32 ucode_beacon_time;
int missed_beacon_threshold;
@@ -863,12 +769,16 @@ struct iwl_priv {
/* firmware reload counter and timestamp */
unsigned long reload_jiffies;
int reload_count;
+ bool ucode_loaded;
+ bool init_ucode_run; /* Don't run init uCode again */
/* we allocate array of iwl_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
struct iwl_channel_info *channel_info; /* channel info array */
u8 channel_count; /* # of channels */
+ u8 plcp_delta_threshold;
+
/* thermal calibration */
s32 temperature; /* Celsius */
s32 last_temperature;
@@ -891,16 +801,11 @@ struct iwl_priv {
bool new_scan_threshold_behaviour;
+ bool wowlan;
+
/* EEPROM MAC addresses */
struct mac_address addresses[2];
- /* uCode images, save to reload in case of failure */
- int fw_index; /* firmware we're trying to load */
- u32 ucode_ver; /* version of ucode, copy of
- iwl_ucode.ver */
-
- char firmware_name[25];
-
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
__le16 switch_channel;
@@ -910,7 +815,6 @@ struct iwl_priv {
u8 start_calib;
struct iwl_sensitivity_data sensitivity_data;
struct iwl_chain_noise_data chain_noise_data;
- bool enhance_sensitivity_table;
__le16 sensitivity_tbl[HD_TABLE_SIZE];
__le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES];
@@ -956,6 +860,7 @@ struct iwl_priv {
struct statistics_bt_activity bt_activity;
__le32 num_bt_kills, accum_num_bt_kills;
#endif
+ spinlock_t lock;
} statistics;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
@@ -978,11 +883,6 @@ struct iwl_priv {
struct iwl_rx_phy_res last_phy_res;
bool last_phy_res_valid;
- struct completion firmware_loading_complete;
-
- u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
- u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
-
/*
* chain noise reset and gain commands are the
* two extra calibration commands follows the standard
@@ -1073,7 +973,7 @@ struct iwl_priv {
bool led_registered;
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace testmode_trace;
- struct iwl_testmode_sram testmode_sram;
+ struct iwl_testmode_mem testmode_mem;
u32 tm_fixed_rate;
#endif
@@ -1084,6 +984,7 @@ struct iwl_priv {
bool have_rekey_data;
}; /*iwl_priv */
+extern struct kmem_cache *iwl_tx_cmd_pool;
extern struct iwl_mod_params iwlagn_mod_params;
static inline struct iwl_rxon_context *
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 2a2c8de64a04..91f45e71e0a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 9b212a8f30bb..06203d6a1d86 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -34,185 +34,254 @@
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
#endif
-#define PRIV_ENTRY __field(void *, priv)
-#define PRIV_ASSIGN __entry->priv = priv
+#define DEV_ENTRY __string(dev, dev_name(dev))
+#define DEV_ASSIGN __assign_str(dev, dev_name(dev))
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_io
TRACE_EVENT(iwlwifi_dev_ioread32,
- TP_PROTO(void *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val)
+ TP_printk("[%s] read io[%#x] = %#x",
+ __get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite8,
- TP_PROTO(void *priv, u32 offs, u8 val),
- TP_ARGS(priv, offs, val),
+ TP_PROTO(const struct device *dev, u32 offs, u8 val),
+ TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, offs)
__field(u8, val)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
+ TP_printk("[%s] write io[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite32,
- TP_PROTO(void *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
+ TP_printk("[%s] write io[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_irq,
- TP_PROTO(void *priv),
- TP_ARGS(priv),
+ TP_PROTO(const struct device *dev),
+ TP_ARGS(dev),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
),
/* TP_printk("") doesn't compile */
TP_printk("%d", 0)
);
TRACE_EVENT(iwlwifi_dev_ict_read,
- TP_PROTO(void *priv, u32 index, u32 value),
- TP_ARGS(priv, index, value),
+ TP_PROTO(const struct device *dev, u32 index, u32 value),
+ TP_ARGS(dev, index, value),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, index)
__field(u32, value)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->index = index;
__entry->value = value;
),
- TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value)
+ TP_printk("[%s] read ict[%d] = %#.8x",
+ __get_str(dev), __entry->index, __entry->value)
);
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_ucode
TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
- TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
- TP_ARGS(priv, time, data, ev),
+ TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+ TP_ARGS(dev, time, data, ev),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, time)
__field(u32, data)
__field(u32, ev)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->time = time;
__entry->data = data;
__entry->ev = ev;
),
- TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
- __entry->priv, __entry->time, __entry->data, __entry->ev)
+ TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+ __get_str(dev), __entry->time, __entry->data, __entry->ev)
);
TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
- TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
- TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(dev, wraps, n_entry, p_entry),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, wraps)
__field(u32, n_entry)
__field(u32, p_entry)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->wraps = wraps;
__entry->n_entry = n_entry;
__entry->p_entry = p_entry;
),
- TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
- __entry->priv, __entry->wraps, __entry->n_entry,
+ TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X",
+ __get_str(dev), __entry->wraps, __entry->n_entry,
__entry->p_entry)
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_msg
+
+#define MAX_MSG_LEN 100
+
+DECLARE_EVENT_CLASS(iwlwifi_msg_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s", (char *)__get_dynamic_array(msg))
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(iwlwifi_dbg,
+ TP_PROTO(u32 level, bool in_interrupt, const char *function,
+ struct va_format *vaf),
+ TP_ARGS(level, in_interrupt, function, vaf),
+ TP_STRUCT__entry(
+ __field(u32, level)
+ __field(u8, in_interrupt)
+ __string(function, function)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ __entry->in_interrupt = in_interrupt;
+ __assign_str(function, function);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s", (char *)__get_dynamic_array(msg))
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(void *priv, u32 flags,
+ TP_PROTO(const struct device *dev, u32 flags,
const void *hcmd0, size_t len0,
const void *hcmd1, size_t len1,
const void *hcmd2, size_t len2),
- TP_ARGS(priv, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
+ TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__dynamic_array(u8, hcmd0, len0)
__dynamic_array(u8, hcmd1, len1)
__dynamic_array(u8, hcmd2, len2)
__field(u32, flags)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
__entry->flags = flags;
),
- TP_printk("[%p] hcmd %#.2x (%ssync)",
- __entry->priv, ((u8 *)__get_dynamic_array(hcmd0))[0],
+ TP_printk("[%s] hcmd %#.2x (%ssync)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(void *priv, void *rxbuf, size_t len),
- TP_ARGS(priv, rxbuf, len),
+ TP_PROTO(const struct device *dev, void *rxbuf, size_t len),
+ TP_ARGS(dev, rxbuf, len),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__dynamic_array(u8, rxbuf, len)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
),
- TP_printk("[%p] RX cmd %#.2x",
- __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+ TP_printk("[%s] RX cmd %#.2x",
+ __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(void *priv, void *tfd, size_t tfdlen,
+ TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
- TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(size_t, framelen)
__dynamic_array(u8, tfd, tfdlen)
@@ -226,29 +295,28 @@ TRACE_EVENT(iwlwifi_dev_tx,
__dynamic_array(u8, buf1, buf1_len)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->framelen = buf0_len + buf1_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
),
- TP_printk("[%p] TX %.2x (%zu bytes)",
- __entry->priv,
- ((u8 *)__get_dynamic_array(buf0))[0],
+ TP_printk("[%s] TX %.2x (%zu bytes)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
__entry->framelen)
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(void *priv, u32 desc, u32 tsf_low,
+ TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
u32 brd_ver),
- TP_ARGS(priv, desc, tsf_low, data1, data2, line,
+ TP_ARGS(dev, desc, tsf_low, data1, data2, line,
blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
gp3, ucode_ver, hw_ver, brd_ver),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, desc)
__field(u32, tsf_low)
__field(u32, data1)
@@ -267,7 +335,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
__field(u32, brd_ver)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->desc = desc;
__entry->tsf_low = tsf_low;
__entry->data1 = data1;
@@ -285,11 +353,11 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
__entry->hw_ver = hw_ver;
__entry->brd_ver = brd_ver;
),
- TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+ TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
"blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
"bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
"hw 0x%08X brd 0x%08X",
- __entry->priv, __entry->desc, __entry->tsf_low,
+ __get_str(dev), __entry->desc, __entry->tsf_low,
__entry->data1,
__entry->data2, __entry->line, __entry->blink1,
__entry->blink2, __entry->ilink1, __entry->ilink2,
@@ -299,23 +367,23 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
);
TRACE_EVENT(iwlwifi_dev_ucode_event,
- TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
- TP_ARGS(priv, time, data, ev),
+ TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+ TP_ARGS(dev, time, data, ev),
TP_STRUCT__entry(
- PRIV_ENTRY
+ DEV_ENTRY
__field(u32, time)
__field(u32, data)
__field(u32, ev)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ DEV_ASSIGN;
__entry->time = time;
__entry->data = data;
__entry->ev = ev;
),
- TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
- __entry->priv, __entry->time, __entry->data, __entry->ev)
+ TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+ __get_str(dev), __entry->time, __entry->data, __entry->ev)
);
#endif /* __IWLWIFI_DEVICE_TRACE */
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
new file mode 100644
index 000000000000..6f312c77af5e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -0,0 +1,993 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+#include "iwl-shared.h"
+#include "iwl-op-mode.h"
+#include "iwl-agn-hw.h"
+
+/* private includes */
+#include "iwl-fw-file.h"
+
+/**
+ * struct iwl_drv - drv common data
+ * @fw: the iwl_fw structure
+ * @shrd: pointer to common shared structure
+ * @op_mode: the running op_mode
+ * @fw_index: firmware revision to try loading
+ * @firmware_name: composite filename of ucode file to load
+ * @request_firmware_complete: the firmware has been obtained from user space
+ */
+struct iwl_drv {
+ struct iwl_fw fw;
+
+ struct iwl_shared *shrd;
+ struct iwl_op_mode *op_mode;
+
+ int fw_index; /* firmware we're trying to load */
+ char firmware_name[25]; /* name of firmware file to load */
+
+ struct completion request_firmware_complete;
+};
+
+
+
+/*
+ * struct fw_sec: Just for the image parsing proccess.
+ * For the fw storage we are using struct fw_desc.
+ */
+struct fw_sec {
+ const void *data; /* the sec data */
+ size_t size; /* section size */
+ u32 offset; /* offset of writing in the device */
+};
+
+static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(trans(drv)->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
+{
+ int i;
+ for (i = 0; i < IWL_UCODE_SECTION_MAX; i++)
+ iwl_free_fw_desc(drv, &img->sec[i]);
+}
+
+static void iwl_dealloc_ucode(struct iwl_drv *drv)
+{
+ int i;
+ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
+ iwl_free_fw_img(drv, drv->fw.img + i);
+}
+
+static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
+ struct fw_sec *sec)
+{
+ if (!sec || !sec->size) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(trans(drv)->dev, sec->size,
+ &desc->p_addr, GFP_KERNEL);
+ if (!desc->v_addr)
+ return -ENOMEM;
+
+ desc->len = sec->size;
+ desc->offset = sec->offset;
+ memcpy(desc->v_addr, sec->data, sec->size);
+ return 0;
+}
+
+static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+
+#define UCODE_EXPERIMENTAL_INDEX 100
+#define UCODE_EXPERIMENTAL_TAG "exp"
+
+static int iwl_request_firmware(struct iwl_drv *drv, bool first)
+{
+ const struct iwl_cfg *cfg = cfg(drv);
+ const char *name_pre = cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
+ drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
+ strcpy(tag, UCODE_EXPERIMENTAL_TAG);
+ } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
+#endif
+ drv->fw_index = cfg->ucode_api_max;
+ sprintf(tag, "%d", drv->fw_index);
+ } else {
+ drv->fw_index--;
+ sprintf(tag, "%d", drv->fw_index);
+ }
+
+ if (drv->fw_index < cfg->ucode_api_min) {
+ IWL_ERR(drv, "no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
+ (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
+ ? "EXPERIMENTAL " : "",
+ drv->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
+ trans(drv)->dev,
+ GFP_KERNEL, drv, iwl_ucode_callback);
+}
+
+struct fw_img_parsing {
+ struct fw_sec sec[IWL_UCODE_SECTION_MAX];
+ int sec_counter;
+};
+
+/*
+ * struct fw_sec_parsing: to extract fw section and it's offset from tlv
+ */
+struct fw_sec_parsing {
+ __le32 offset;
+ const u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tlv_calib_data - parse the default calib data from TLV
+ *
+ * @ucode_type: the uCode to which the following default calib relates.
+ * @calib: default calibrations.
+ */
+struct iwl_tlv_calib_data {
+ __le32 ucode_type;
+ __le64 calib;
+} __packed;
+
+struct iwl_firmware_pieces {
+ struct fw_img_parsing img[IWL_UCODE_TYPE_MAX];
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+};
+
+/*
+ * These functions are just to extract uCode section data from the pieces
+ * structure.
+ */
+static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type,
+ int sec)
+{
+ return &pieces->img[type].sec[sec];
+}
+
+static void set_sec_data(struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type,
+ int sec,
+ const void *data)
+{
+ pieces->img[type].sec[sec].data = data;
+}
+
+static void set_sec_size(struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type,
+ int sec,
+ size_t size)
+{
+ pieces->img[type].sec[sec].size = size;
+}
+
+static size_t get_sec_size(struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type,
+ int sec)
+{
+ return pieces->img[type].sec[sec].size;
+}
+
+static void set_sec_offset(struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type,
+ int sec,
+ u32 offset)
+{
+ pieces->img[type].sec[sec].offset = offset;
+}
+
+/*
+ * Gets uCode section from tlv.
+ */
+static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
+ const void *data, enum iwl_ucode_type type,
+ int size)
+{
+ struct fw_img_parsing *img;
+ struct fw_sec *sec;
+ struct fw_sec_parsing *sec_parse;
+
+ if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
+ return -1;
+
+ sec_parse = (struct fw_sec_parsing *)data;
+
+ img = &pieces->img[type];
+ sec = &img->sec[img->sec_counter];
+
+ sec->offset = le32_to_cpu(sec_parse->offset);
+ sec->data = sec_parse->data;
+
+ ++img->sec_counter;
+
+ return 0;
+}
+
+static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
+{
+ struct iwl_tlv_calib_data *def_calib =
+ (struct iwl_tlv_calib_data *)data;
+ u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
+ if (ucode_type >= IWL_UCODE_TYPE_MAX) {
+ IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
+ ucode_type);
+ return -EINVAL;
+ }
+ drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib);
+ return 0;
+}
+
+static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
+ const struct firmware *ucode_raw,
+ struct iwl_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size, build;
+ char buildstr[25];
+ const u8 *src;
+
+ drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+
+ switch (api_ver) {
+ default:
+ hdr_size = 28;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(drv, "File size too small!\n");
+ return -EINVAL;
+ }
+ build = le32_to_cpu(ucode->u.v2.build);
+ set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
+ le32_to_cpu(ucode->u.v2.inst_size));
+ set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
+ le32_to_cpu(ucode->u.v2.data_size));
+ set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
+ le32_to_cpu(ucode->u.v2.init_size));
+ set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
+ le32_to_cpu(ucode->u.v2.init_data_size));
+ src = ucode->u.v2.data;
+ break;
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(drv, "File size too small!\n");
+ return -EINVAL;
+ }
+ build = 0;
+ set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
+ le32_to_cpu(ucode->u.v1.inst_size));
+ set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
+ le32_to_cpu(ucode->u.v1.data_size));
+ set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
+ le32_to_cpu(ucode->u.v1.init_size));
+ set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
+ le32_to_cpu(ucode->u.v1.init_data_size));
+ src = ucode->u.v1.data;
+ break;
+ }
+
+ if (build)
+ sprintf(buildstr, " build %u%s", build,
+ (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
+ ? " (EXP)" : "");
+ else
+ buildstr[0] = '\0';
+
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%u.%u.%u%s",
+ IWL_UCODE_MAJOR(drv->fw.ucode_ver),
+ IWL_UCODE_MINOR(drv->fw.ucode_ver),
+ IWL_UCODE_API(drv->fw.ucode_ver),
+ IWL_UCODE_SERIAL(drv->fw.ucode_ver),
+ buildstr);
+
+ /* Verify size of file vs. image size info in file's header */
+
+ if (ucode_raw->size != hdr_size +
+ get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
+ get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
+ get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
+ get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
+
+ IWL_ERR(drv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+
+ set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
+ src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
+ set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
+ src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
+ set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+ set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
+ src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
+ set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
+ src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
+ set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+ return 0;
+}
+
+static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
+ const struct firmware *ucode_raw,
+ struct iwl_firmware_pieces *pieces,
+ struct iwl_ucode_capabilities *capa)
+{
+ struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ struct iwl_ucode_tlv *tlv;
+ size_t len = ucode_raw->size;
+ const u8 *data;
+ int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
+ int tmp;
+ u64 alternatives;
+ u32 tlv_len;
+ enum iwl_ucode_tlv_type tlv_type;
+ const u8 *tlv_data;
+ char buildstr[25];
+ u32 build;
+
+ if (len < sizeof(*ucode)) {
+ IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
+ return -EINVAL;
+ }
+
+ if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
+ IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
+ le32_to_cpu(ucode->magic));
+ return -EINVAL;
+ }
+
+ /*
+ * Check which alternatives are present, and "downgrade"
+ * when the chosen alternative is not present, warning
+ * the user when that happens. Some files may not have
+ * any alternatives, so don't warn in that case.
+ */
+ alternatives = le64_to_cpu(ucode->alternatives);
+ tmp = wanted_alternative;
+ if (wanted_alternative > 63)
+ wanted_alternative = 63;
+ while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
+ wanted_alternative--;
+ if (wanted_alternative && wanted_alternative != tmp)
+ IWL_WARN(drv,
+ "uCode alternative %d not available, choosing %d\n",
+ tmp, wanted_alternative);
+
+ drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+ build = le32_to_cpu(ucode->build);
+
+ if (build)
+ sprintf(buildstr, " build %u%s", build,
+ (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
+ ? " (EXP)" : "");
+ else
+ buildstr[0] = '\0';
+
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%u.%u.%u%s",
+ IWL_UCODE_MAJOR(drv->fw.ucode_ver),
+ IWL_UCODE_MINOR(drv->fw.ucode_ver),
+ IWL_UCODE_API(drv->fw.ucode_ver),
+ IWL_UCODE_SERIAL(drv->fw.ucode_ver),
+ buildstr);
+
+ data = ucode->data;
+
+ len -= sizeof(*ucode);
+
+ while (len >= sizeof(*tlv)) {
+ u16 tlv_alt;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_alt = le16_to_cpu(tlv->alternative);
+ tlv_data = tlv->data;
+
+ if (len < tlv_len) {
+ IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return -EINVAL;
+ }
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ /*
+ * Alternative 0 is always valid.
+ *
+ * Skip alternative TLVs that are not selected.
+ */
+ if (tlv_alt != 0 && tlv_alt != wanted_alternative)
+ continue;
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_INST:
+ set_sec_data(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST, tlv_data);
+ set_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST, tlv_len);
+ set_sec_offset(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ break;
+ case IWL_UCODE_TLV_DATA:
+ set_sec_data(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA, tlv_data);
+ set_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA, tlv_len);
+ set_sec_offset(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+ break;
+ case IWL_UCODE_TLV_INIT:
+ set_sec_data(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_INST, tlv_data);
+ set_sec_size(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_INST, tlv_len);
+ set_sec_offset(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_INST,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ break;
+ case IWL_UCODE_TLV_INIT_DATA:
+ set_sec_data(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_DATA, tlv_data);
+ set_sec_size(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_DATA, tlv_len);
+ set_sec_offset(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_DATA,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+ break;
+ case IWL_UCODE_TLV_BOOT:
+ IWL_ERR(drv, "Found unexpected BOOT ucode\n");
+ break;
+ case IWL_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ capa->max_probe_length =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_PAN:
+ if (tlv_len)
+ goto invalid_tlv_len;
+ capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
+ break;
+ case IWL_UCODE_TLV_FLAGS:
+ /* must be at least one u32 */
+ if (tlv_len < sizeof(u32))
+ goto invalid_tlv_len;
+ /* and a proper number of u32s */
+ if (tlv_len % sizeof(u32))
+ goto invalid_tlv_len;
+ /*
+ * This driver only reads the first u32 as
+ * right now no more features are defined,
+ * if that changes then either the driver
+ * will not work with the new firmware, or
+ * it'll not take advantage of new features.
+ */
+ capa->flags = le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->init_evtlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->init_evtlog_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->init_errlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->inst_evtlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->inst_evtlog_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ pieces->inst_errlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
+ if (tlv_len)
+ goto invalid_tlv_len;
+ drv->fw.enhance_sensitivity_table = true;
+ break;
+ case IWL_UCODE_TLV_WOWLAN_INST:
+ set_sec_data(pieces, IWL_UCODE_WOWLAN,
+ IWL_UCODE_SECTION_INST, tlv_data);
+ set_sec_size(pieces, IWL_UCODE_WOWLAN,
+ IWL_UCODE_SECTION_INST, tlv_len);
+ set_sec_offset(pieces, IWL_UCODE_WOWLAN,
+ IWL_UCODE_SECTION_INST,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ break;
+ case IWL_UCODE_TLV_WOWLAN_DATA:
+ set_sec_data(pieces, IWL_UCODE_WOWLAN,
+ IWL_UCODE_SECTION_DATA, tlv_data);
+ set_sec_size(pieces, IWL_UCODE_WOWLAN,
+ IWL_UCODE_SECTION_DATA, tlv_len);
+ set_sec_offset(pieces, IWL_UCODE_WOWLAN,
+ IWL_UCODE_SECTION_DATA,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+ break;
+ case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ capa->standard_phy_calibration_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_SEC_RT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ break;
+ case IWL_UCODE_TLV_SEC_INIT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ break;
+ case IWL_UCODE_TLV_SEC_WOWLAN:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ break;
+ case IWL_UCODE_TLV_DEF_CALIB:
+ if (tlv_len != sizeof(struct iwl_tlv_calib_data))
+ goto invalid_tlv_len;
+ if (iwl_set_default_calib(drv, tlv_data))
+ goto tlv_error;
+ break;
+ case IWL_UCODE_TLV_PHY_SKU:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+ break;
+ default:
+ IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
+ break;
+ }
+ }
+
+ if (len) {
+ IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
+ iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+ return -EINVAL;
+ }
+
+ return 0;
+
+ invalid_tlv_len:
+ IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
+ tlv_error:
+ iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
+
+ return -EINVAL;
+}
+
+static int alloc_pci_desc(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
+{
+ int i;
+ for (i = 0;
+ i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
+ i++)
+ if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
+ get_sec(pieces, type, i)))
+ return -1;
+ return 0;
+}
+
+static int validate_sec_sizes(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ const struct iwl_cfg *cfg)
+{
+ IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
+ get_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST));
+ IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
+ get_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA));
+ IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
+ get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
+ IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
+ get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
+
+ /* Verify that uCode images will fit in card's SRAM. */
+ if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
+ cfg->max_inst_size) {
+ IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
+ get_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST));
+ return -1;
+ }
+
+ if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
+ cfg->max_data_size) {
+ IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
+ get_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA));
+ return -1;
+ }
+
+ if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
+ cfg->max_inst_size) {
+ IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
+ get_sec_size(pieces, IWL_UCODE_INIT,
+ IWL_UCODE_SECTION_INST));
+ return -1;
+ }
+
+ if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
+ cfg->max_data_size) {
+ IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
+ get_sec_size(pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA));
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * iwl_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct iwl_drv *drv = context;
+ const struct iwl_cfg *cfg = cfg(drv);
+ struct iwl_fw *fw = &drv->fw;
+ struct iwl_ucode_header *ucode;
+ int err;
+ struct iwl_firmware_pieces pieces;
+ const unsigned int api_max = cfg->ucode_api_max;
+ unsigned int api_ok = cfg->ucode_api_ok;
+ const unsigned int api_min = cfg->ucode_api_min;
+ u32 api_ver;
+ int i;
+
+ fw->ucode_capa.max_probe_length = 200;
+ fw->ucode_capa.standard_phy_calibration_size =
+ IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ if (!api_ok)
+ api_ok = api_max;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (drv->fw_index <= api_ok)
+ IWL_ERR(drv,
+ "request for firmware file '%s' failed.\n",
+ drv->firmware_name);
+ goto try_again;
+ }
+
+ IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
+ drv->firmware_name, ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IWL_ERR(drv, "File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+ if (ucode->ver)
+ err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces);
+ else
+ err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces,
+ &fw->ucode_capa);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ /* no api version check required for experimental uCode */
+ if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
+ if (api_ver < api_min || api_ver > api_max) {
+ IWL_ERR(drv,
+ "Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n",
+ api_max, api_ver);
+ goto try_again;
+ }
+
+ if (api_ver < api_ok) {
+ if (api_ok != api_max)
+ IWL_ERR(drv, "Firmware has old API version, "
+ "expected v%u through v%u, got v%u.\n",
+ api_ok, api_max, api_ver);
+ else
+ IWL_ERR(drv, "Firmware has old API version, "
+ "expected v%u, got v%u.\n",
+ api_max, api_ver);
+ IWL_ERR(drv, "New firmware can be obtained from "
+ "http://www.intellinuxwireless.org/.\n");
+ }
+ }
+
+ IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ IWL_DEBUG_INFO(drv, "f/w package hdr ucode version raw = 0x%x\n",
+ drv->fw.ucode_ver);
+ IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
+ get_sec_size(&pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST));
+ IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
+ get_sec_size(&pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA));
+ IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
+ get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
+ IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
+ get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
+ cfg->max_inst_size) {
+ IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
+ get_sec_size(&pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_INST));
+ goto try_again;
+ }
+
+ if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
+ cfg->max_data_size) {
+ IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
+ get_sec_size(&pieces, IWL_UCODE_REGULAR,
+ IWL_UCODE_SECTION_DATA));
+ goto try_again;
+ }
+
+ /*
+ * In mvm uCode there is no difference between data and instructions
+ * sections.
+ */
+ if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, cfg))
+ goto try_again;
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
+ if (alloc_pci_desc(drv, &pieces, i))
+ goto err_pci_alloc;
+
+ /* Now that we can no longer fail, copy information */
+
+ /*
+ * The (size - 16) / 12 formula is based on the information recorded
+ * for each event, which is of mode 1 (including timestamp) for all
+ * new microcodes that include this information.
+ */
+ fw->init_evtlog_ptr = pieces.init_evtlog_ptr;
+ if (pieces.init_evtlog_size)
+ fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+ else
+ fw->init_evtlog_size =
+ cfg->base_params->max_event_log_size;
+ fw->init_errlog_ptr = pieces.init_errlog_ptr;
+ fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
+ if (pieces.inst_evtlog_size)
+ fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+ else
+ fw->inst_evtlog_size =
+ cfg->base_params->max_event_log_size;
+ fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ if (fw->ucode_capa.standard_phy_calibration_size >
+ IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
+ fw->ucode_capa.standard_phy_calibration_size =
+ IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&drv->request_firmware_complete);
+
+ drv->op_mode = iwl_dvm_ops.start(drv->shrd->trans, &drv->fw);
+
+ if (!drv->op_mode)
+ goto out_unbind;
+
+ return;
+
+ try_again:
+ /* try next, if any */
+ release_firmware(ucode_raw);
+ if (iwl_request_firmware(drv, false))
+ goto out_unbind;
+ return;
+
+ err_pci_alloc:
+ IWL_ERR(drv, "failed to allocate pci memory\n");
+ iwl_dealloc_ucode(drv);
+ release_firmware(ucode_raw);
+ out_unbind:
+ complete(&drv->request_firmware_complete);
+ device_release_driver(trans(drv)->dev);
+}
+
+int iwl_drv_start(struct iwl_shared *shrd,
+ struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+ struct iwl_drv *drv;
+ int ret;
+
+ shrd->cfg = cfg;
+
+ drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+ if (!drv) {
+ dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv");
+ return -ENOMEM;
+ }
+ drv->shrd = shrd;
+ shrd->drv = drv;
+
+ init_completion(&drv->request_firmware_complete);
+
+ ret = iwl_request_firmware(drv, true);
+
+ if (ret) {
+ dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw");
+ kfree(drv);
+ shrd->drv = NULL;
+ }
+
+ return ret;
+}
+
+void iwl_drv_stop(struct iwl_shared *shrd)
+{
+ struct iwl_drv *drv = shrd->drv;
+
+ wait_for_completion(&drv->request_firmware_complete);
+
+ /* op_mode can be NULL if its start failed */
+ if (drv->op_mode)
+ iwl_op_mode_stop(drv->op_mode);
+
+ iwl_dealloc_ucode(drv);
+
+ kfree(drv);
+ shrd->drv = NULL;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
new file mode 100644
index 000000000000..3b771c1d9096
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -0,0 +1,123 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_drv_h__
+#define __iwl_drv_h__
+
+#include "iwl-shared.h"
+
+/**
+ * DOC: Driver system flows - drv component
+ *
+ * This component implements the system flows such as bus enumeration, bus
+ * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in
+ * bus specific files (transport files). This is the code that is common among
+ * different buses.
+ *
+ * This component is also in charge of managing the several implementations of
+ * the wifi flows: it will allow to have several fw API implementation. These
+ * different implementations will differ in the way they implement mac80211's
+ * handlers too.
+
+ * The init flow wrt to the drv component looks like this:
+ * 1) The bus specific component is called from module_init
+ * 2) The bus specific component registers the bus driver
+ * 3) The bus driver calls the probe function
+ * 4) The bus specific component configures the bus
+ * 5) The bus specific component calls to the drv bus agnostic part
+ * (iwl_drv_start)
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback
+ * 7) iwl_ucode_callback parses the fw file
+ * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
+ */
+
+/**
+ * iwl_drv_start - start the drv
+ *
+ * @shrd: the shrd area
+ * @trans_ops: the ops of the transport
+ * @cfg: device specific constants / virtual functions
+ *
+ * TODO: review the parameters given to this function
+ *
+ * starts the driver: fetches the firmware. This should be called by bus
+ * specific system flows implementations. For example, the bus specific probe
+ * function should do bus related operations only, and then call to this
+ * function.
+ */
+int iwl_drv_start(struct iwl_shared *shrd,
+ struct iwl_trans *trans, const struct iwl_cfg *cfg);
+
+/**
+ * iwl_drv_stop - stop the drv
+ *
+ * @shrd: the shrd area
+ *
+ * TODO: review the parameters given to this function
+ *
+ * Stop the driver. This should be called by bus specific system flows
+ * implementations. For example, the bus specific remove function should first
+ * call this function and then do the bus related operations only.
+ */
+void iwl_drv_stop(struct iwl_shared *shrd);
+
+#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index c1eda9724f42..23cea42b9495 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,7 @@
#include "iwl-agn.h"
#include "iwl-eeprom.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
/************************** EEPROM BANDS ****************************
*
@@ -149,23 +150,27 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
-static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
- iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
- ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
- IWL_DEBUG_EEPROM(bus,
+ IWL_DEBUG_EEPROM(trans,
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
@@ -175,16 +180,17 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
return ret;
}
-static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
+static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
{
- iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
+ iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
{
- u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP) &
+ CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
@@ -247,46 +253,46 @@ err:
}
-int iwl_eeprom_check_sku(struct iwl_priv *priv)
+int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_shared *shrd = priv->shrd;
u16 radio_cfg;
- if (!cfg(priv)->sku) {
- /* not using sku overwrite */
- cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !cfg(priv)->ht_params) {
- IWL_ERR(priv, "Invalid 11n configuration\n");
- return -EINVAL;
- }
+ hw_params(priv).sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !cfg(priv)->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
}
- if (!cfg(priv)->sku) {
+
+ if (!hw_params(priv).sku) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku);
-
- if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) {
- /* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
- cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
- cfg(priv)->valid_tx_ant,
- cfg(priv)->valid_rx_ant);
- return -EINVAL;
- }
- IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
- cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant);
+ IWL_INFO(priv, "Device SKU: 0x%X\n", hw_params(priv).sku);
+
+ radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+
+ hw_params(priv).valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ hw_params(priv).valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+ /* check overrides (some devices have wrong EEPROM) */
+ if (cfg(priv)->valid_tx_ant)
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ if (cfg(priv)->valid_rx_ant)
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+
+ if (!hw_params(priv).valid_tx_ant || !hw_params(priv).valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
+ hw_params(priv).valid_tx_ant,
+ hw_params(priv).valid_rx_ant);
+ return -EINVAL;
}
- /*
- * for some special cases,
- * EEPROM did not reflect the correct antenna setting
- * so overwrite the valid tx/rx antenna from .cfg
- */
+
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ hw_params(priv).valid_tx_ant, hw_params(priv).valid_rx_ant);
+
return 0;
}
@@ -303,19 +309,20 @@ void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
*
******************************************************************************/
-static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
+static void iwl_set_otp_access(struct iwl_trans *trans,
+ enum iwl_access_mode mode)
{
- iwl_read32(bus, CSR_OTP_GP_REG);
+ iwl_read32(trans, CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(bus, CSR_OTP_GP_REG,
+ iwl_clear_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
- iwl_set_bit(bus, CSR_OTP_GP_REG,
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
-static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_trans *trans, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@@ -323,7 +330,7 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
/* OTP only valid for CP/PP and after */
switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(bus, "Unknown hardware type\n");
+ IWL_ERR(trans, "Unknown hardware type\n");
return -ENOENT;
case CSR_HW_REV_TYPE_5300:
case CSR_HW_REV_TYPE_5350:
@@ -332,7 +339,7 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
- otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@@ -342,73 +349,74 @@ static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
return nvm_type;
}
-static int iwl_init_otp_access(struct iwl_bus *bus)
+static int iwl_init_otp_access(struct iwl_trans *trans)
{
int ret;
/* Enable 40MHz radio clock */
- iwl_write32(bus, CSR_GP_CNTRL,
- iwl_read32(bus, CSR_GP_CNTRL) |
+ iwl_write32(trans, CSR_GP_CNTRL,
+ iwl_read32(trans, CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
- ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
- IWL_ERR(bus, "Time out access OTP\n");
+ IWL_ERR(trans, "Time out access OTP\n");
else {
- iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
+ iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
- if (cfg(bus)->base_params->shadow_ram_support)
- iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
+ if (cfg(trans)->base_params->shadow_ram_support)
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
}
-static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
+ __le16 *eeprom_data)
{
int ret = 0;
u32 r;
u32 otpgp;
- iwl_write32(bus, CSR_EEPROM_REG,
+ iwl_write32(trans, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
- IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
+ IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
return ret;
}
- r = iwl_read32(bus, CSR_EEPROM_REG);
+ r = iwl_read32(trans, CSR_EEPROM_REG);
/* check for ECC errors: */
- otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus, CSR_OTP_GP_REG,
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
+ IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
}
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus, CSR_OTP_GP_REG,
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
+ IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
}
*eeprom_data = cpu_to_le16(r >> 16);
return 0;
@@ -417,20 +425,20 @@ static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
/*
* iwl_is_otp_empty: check for empty OTP
*/
-static bool iwl_is_otp_empty(struct iwl_bus *bus)
+static bool iwl_is_otp_empty(struct iwl_trans *trans)
{
u16 next_link_addr = 0;
__le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
+ if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
if (!link_value) {
- IWL_ERR(bus, "OTP is empty\n");
+ IWL_ERR(trans, "OTP is empty\n");
is_empty = true;
}
} else {
- IWL_ERR(bus, "Unable to read first block of OTP list.\n");
+ IWL_ERR(trans, "Unable to read first block of OTP list.\n");
is_empty = true;
}
@@ -447,7 +455,7 @@ static bool iwl_is_otp_empty(struct iwl_bus *bus)
* we should read and used to configure the device.
* only perform this operation if shadow RAM is disabled
*/
-static int iwl_find_otp_image(struct iwl_bus *bus,
+static int iwl_find_otp_image(struct iwl_trans *trans,
u16 *validblockaddr)
{
u16 next_link_addr = 0, valid_addr;
@@ -455,10 +463,10 @@ static int iwl_find_otp_image(struct iwl_bus *bus,
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
+ iwl_set_otp_access(trans, IWL_OTP_ACCESS_ABSOLUTE);
/* checking for empty OTP or error */
- if (iwl_is_otp_empty(bus))
+ if (iwl_is_otp_empty(trans))
return -EINVAL;
/*
@@ -472,9 +480,9 @@ static int iwl_find_otp_image(struct iwl_bus *bus,
*/
valid_addr = next_link_addr;
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
+ IWL_DEBUG_EEPROM(trans, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
- if (iwl_read_otp_word(bus, next_link_addr, &link_value))
+ if (iwl_read_otp_word(trans, next_link_addr, &link_value))
return -EINVAL;
if (!link_value) {
/*
@@ -489,10 +497,10 @@ static int iwl_find_otp_image(struct iwl_bus *bus,
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks <= cfg(bus)->base_params->max_ll_items);
+ } while (usedblocks <= cfg(trans)->base_params->max_ll_items);
/* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
+ IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
return -EINVAL;
}
@@ -505,7 +513,7 @@ static int iwl_find_otp_image(struct iwl_bus *bus,
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
-static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
@@ -581,7 +589,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
? # x " " : "")
-void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
+static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
struct iwl_shared *shrd = priv->shrd;
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
@@ -652,65 +660,62 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
*
* NOTE: This routine uses the non-debug IO access functions.
*/
-int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
+int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev)
{
- struct iwl_shared *shrd = priv->shrd;
__le16 *e;
- u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
int sz;
int ret;
u16 addr;
u16 validblockaddr = 0;
u16 cache_addr = 0;
- trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
- if (trans(priv)->nvm_device_type == -ENOENT)
+ trans->nvm_device_type = iwl_get_nvm_type(trans, hw_rev);
+ if (trans->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
- sz = cfg(priv)->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- shrd->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!shrd->eeprom) {
+ sz = cfg(trans)->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(trans, "NVM size = %d\n", sz);
+ trans->shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!trans->shrd->eeprom) {
ret = -ENOMEM;
goto alloc_err;
}
- e = (__le16 *)shrd->eeprom;
-
- iwl_apm_init(priv);
+ e = (__le16 *)trans->shrd->eeprom;
- ret = iwl_eeprom_verify_signature(trans(priv));
+ ret = iwl_eeprom_verify_signature(trans);
if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
goto err;
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(bus(priv));
+ ret = iwl_eeprom_acquire_semaphore(trans);
if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
+ IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
goto err;
}
- if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+ if (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- ret = iwl_init_otp_access(bus(priv));
+ ret = iwl_init_otp_access(trans);
if (ret) {
- IWL_ERR(priv, "Failed to initialize OTP access.\n");
+ IWL_ERR(trans, "Failed to initialize OTP access.\n");
ret = -ENOENT;
goto done;
}
- iwl_write32(bus(priv), CSR_EEPROM_GP,
- iwl_read32(bus(priv), CSR_EEPROM_GP) &
+ iwl_write32(trans, CSR_EEPROM_GP,
+ iwl_read32(trans, CSR_EEPROM_GP) &
~CSR_EEPROM_GP_IF_OWNER_MSK);
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
- if (!cfg(priv)->base_params->shadow_ram_support) {
- if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
+ if (!cfg(trans)->base_params->shadow_ram_support) {
+ if (iwl_find_otp_image(trans, &validblockaddr)) {
ret = -ENOENT;
goto done;
}
@@ -719,7 +724,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
addr += sizeof(u16)) {
__le16 eeprom_data;
- ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
+ ret = iwl_read_otp_word(trans, addr, &eeprom_data);
if (ret)
goto done;
e[cache_addr / 2] = eeprom_data;
@@ -730,36 +735,35 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
for (addr = 0; addr < sz; addr += sizeof(u16)) {
u32 r;
- iwl_write32(bus(priv), CSR_EEPROM_REG,
+ iwl_write32(trans, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
- IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
+ IWL_ERR(trans,
+ "Time out reading EEPROM[%d]\n", addr);
goto done;
}
- r = iwl_read32(bus(priv), CSR_EEPROM_REG);
+ r = iwl_read32(trans, CSR_EEPROM_REG);
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ IWL_DEBUG_EEPROM(trans, "NVM Type: %s, version: 0x%x\n",
+ (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
- iwl_eeprom_query16(shrd, EEPROM_VERSION));
+ iwl_eeprom_query16(trans->shrd, EEPROM_VERSION));
ret = 0;
done:
- iwl_eeprom_release_semaphore(bus(priv));
+ iwl_eeprom_release_semaphore(trans);
err:
if (ret)
- iwl_eeprom_free(priv->shrd);
- /* Reset chip to save power until we load uCode during "up". */
- iwl_apm_stop(priv);
+ iwl_eeprom_free(trans->shrd);
alloc_err:
return ret;
}
@@ -1021,8 +1025,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
* driver need to process addition information
* to determine the max channel tx power limits
*/
- if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower)
- cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv);
+ if (cfg(priv)->lib->eeprom_ops.enhanced_txpower)
+ iwl_eeprom_enhanced_txpower(priv);
return 0;
}
@@ -1072,7 +1076,7 @@ void iwl_rf_config(struct iwl_priv *priv)
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -1084,7 +1088,7 @@ void iwl_rf_config(struct iwl_priv *priv)
WARN_ON(1);
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 9fa937ec35e3..e4a758340996 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,6 +67,7 @@
struct iwl_priv;
struct iwl_shared;
+struct iwl_trans;
/*
* EEPROM access time values:
@@ -301,14 +302,14 @@ extern const u8 iwl_eeprom_band_1[14];
struct iwl_eeprom_ops {
const u32 regulatory_bands[7];
- void (*update_enhanced_txpower) (struct iwl_priv *priv);
+ bool enhanced_txpower;
};
-int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
+int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev);
void iwl_eeprom_free(struct iwl_shared *shrd);
int iwl_eeprom_check_version(struct iwl_priv *priv);
-int iwl_eeprom_check_sku(struct iwl_priv *priv);
+int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 5bede9d7f955..90208094b8eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
new file mode 100644
index 000000000000..c924ccb93c8c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_file_h__
+#define __iwl_fw_file_h__
+
+#include <linux/netdevice.h>
+
+/* v1/v2 uCode file layout */
+struct iwl_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ union {
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+ struct {
+ __le32 build; /* build number */
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v2;
+ } u;
+};
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data. To facilitate "groups", for example
+ * different instruction image with different capabilities,
+ * bundled with the same init image, an alternative mechanism
+ * is provided:
+ * When the alternative field is 0, that means that the item
+ * is always valid. When it is non-zero, then it is only
+ * valid in conjunction with items of the same alternative,
+ * in which case the driver (user) selects one alternative
+ * to use.
+ */
+
+enum iwl_ucode_tlv_type {
+ IWL_UCODE_TLV_INVALID = 0, /* unused */
+ IWL_UCODE_TLV_INST = 1,
+ IWL_UCODE_TLV_DATA = 2,
+ IWL_UCODE_TLV_INIT = 3,
+ IWL_UCODE_TLV_INIT_DATA = 4,
+ IWL_UCODE_TLV_BOOT = 5,
+ IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
+ IWL_UCODE_TLV_PAN = 7,
+ IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
+ IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
+ IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
+ IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
+ IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
+ IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
+ IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
+ IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
+ IWL_UCODE_TLV_WOWLAN_INST = 16,
+ IWL_UCODE_TLV_WOWLAN_DATA = 17,
+ IWL_UCODE_TLV_FLAGS = 18,
+ IWL_UCODE_TLV_SEC_RT = 19,
+ IWL_UCODE_TLV_SEC_INIT = 20,
+ IWL_UCODE_TLV_SEC_WOWLAN = 21,
+ IWL_UCODE_TLV_DEF_CALIB = 22,
+ IWL_UCODE_TLV_PHY_SKU = 23,
+};
+
+struct iwl_ucode_tlv {
+ __le16 type; /* see above */
+ __le16 alternative; /* see comment */
+ __le32 length; /* not including type/length fields */
+ u8 data[0];
+};
+
+#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwl_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ __le32 zero;
+ __le32 magic;
+ u8 human_readable[64];
+ __le32 ver; /* major/minor/API/serial */
+ __le32 build;
+ __le64 alternatives; /* bitmask of valid alternatives */
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ u8 data[0];
+};
+
+#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
new file mode 100644
index 000000000000..8e36bdc1e522
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -0,0 +1,177 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_h__
+#define __iwl_fw_h__
+#include <linux/types.h>
+
+/**
+ * enum iwl_ucode_tlv_flag - ucode API flags
+ * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ */
+enum iwl_ucode_tlv_flag {
+ IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+ IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+ IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
+};
+
+/* The default calibrate table size if not specified by firmware file */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
+
+/**
+ * enum iwl_ucode_type
+ *
+ * The type of ucode.
+ *
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ */
+enum iwl_ucode_type {
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
+ IWL_UCODE_TYPE_MAX,
+};
+
+/*
+ * enumeration of ucode section.
+ * This enumeration is used for legacy tlv style (before 16.0 uCode).
+ */
+enum iwl_ucode_sec {
+ IWL_UCODE_SECTION_INST,
+ IWL_UCODE_SECTION_DATA,
+};
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWL_UCODE_SECTION_MAX 4
+
+struct iwl_ucode_capabilities {
+ u32 max_probe_length;
+ u32 standard_phy_calibration_size;
+ u32 flags;
+};
+
+/* one for each uCode image (inst/data, init/runtime/wowlan) */
+struct fw_desc {
+ dma_addr_t p_addr; /* hardware address */
+ void *v_addr; /* software address */
+ u32 len; /* size in bytes */
+ u32 offset; /* offset in the device */
+};
+
+struct fw_img {
+ struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+};
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/**
+ * struct iwl_fw - variables associated with the firmware
+ *
+ * @ucode_ver: ucode version from the ucode file
+ * @fw_version: firmware version string
+ * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan.
+ * @ucode_capa: capabilities parsed from the ucode file.
+ * @enhance_sensitivity_table: device can do enhanced sensitivity.
+ * @init_evtlog_ptr: event log offset for init ucode.
+ * @init_evtlog_size: event log size for init ucode.
+ * @init_errlog_ptr: error log offfset for init ucode.
+ * @inst_evtlog_ptr: event log offset for runtime ucode.
+ * @inst_evtlog_size: event log size for runtime ucode.
+ * @inst_errlog_ptr: error log offfset for runtime ucode.
+ */
+struct iwl_fw {
+ u32 ucode_ver;
+
+ char fw_version[ETHTOOL_BUSINFO_LEN];
+
+ /* ucode images */
+ struct fw_img img[IWL_UCODE_TYPE_MAX];
+
+ struct iwl_ucode_capabilities ucode_capa;
+ bool enhance_sensitivity_table;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+ u64 default_calib[IWL_UCODE_TYPE_MAX];
+ u32 phy_config;
+
+ bool mvm_fw;
+};
+
+#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index d57ea6484bbe..081dd34d2387 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -34,41 +34,41 @@
#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline void __iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
+static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{
- iwl_write32(bus, reg, iwl_read32(bus, reg) | mask);
+ iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
}
-static inline void __iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
+static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{
- iwl_write32(bus, reg, iwl_read32(bus, reg) & ~mask);
+ iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
}
-void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
+void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- __iwl_set_bit(bus, reg, mask);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ __iwl_set_bit(trans, reg, mask);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
+void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- __iwl_clear_bit(bus, reg, mask);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ __iwl_clear_bit(trans, reg, mask);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
+int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
- if ((iwl_read32(bus, addr) & mask) == (bits & mask))
+ if ((iwl_read32(trans, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
@@ -77,14 +77,15 @@ int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
return -ETIMEDOUT;
}
-int iwl_grab_nic_access_silent(struct iwl_bus *bus)
+int iwl_grab_nic_access_silent(struct iwl_trans *trans)
{
int ret;
- lockdep_assert_held(&bus->reg_lock);
+ lockdep_assert_held(&trans->reg_lock);
/* this bit wakes up the NIC */
- __iwl_set_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ __iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
@@ -105,70 +106,78 @@ int iwl_grab_nic_access_silent(struct iwl_bus *bus)
* 5000 series and later (including 1000 series) have non-volatile SRAM,
* and do not save/restore SRAM when power cycling.
*/
- ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (ret < 0) {
- iwl_write32(bus, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
return -EIO;
}
return 0;
}
-int iwl_grab_nic_access(struct iwl_bus *bus)
+bool iwl_grab_nic_access(struct iwl_trans *trans)
{
- int ret = iwl_grab_nic_access_silent(bus);
- if (ret) {
- u32 val = iwl_read32(bus, CSR_GP_CNTRL);
- IWL_ERR(bus,
- "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ int ret = iwl_grab_nic_access_silent(trans);
+ if (unlikely(ret)) {
+ u32 val = iwl_read32(trans, CSR_GP_CNTRL);
+ WARN_ONCE(1, "Timeout waiting for hardware access "
+ "(CSR_GP_CNTRL 0x%08x)\n", val);
+ return false;
}
- return ret;
+ return true;
}
-void iwl_release_nic_access(struct iwl_bus *bus)
+void iwl_release_nic_access(struct iwl_trans *trans)
{
- lockdep_assert_held(&bus->reg_lock);
- __iwl_clear_bit(bus, CSR_GP_CNTRL,
+ lockdep_assert_held(&trans->reg_lock);
+ __iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
}
-u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
+u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value;
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- iwl_grab_nic_access(bus);
- value = iwl_read32(bus, reg);
- iwl_release_nic_access(bus);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ value = iwl_read32(trans, reg);
+ iwl_release_nic_access(trans);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
return value;
}
-void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value)
+void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- if (!iwl_grab_nic_access(bus)) {
- iwl_write32(bus, reg, value);
- iwl_release_nic_access(bus);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ iwl_write32(trans, reg, value);
+ iwl_release_nic_access(trans);
}
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
+int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
{
int t = 0;
do {
- if ((iwl_read_direct32(bus, addr) & mask) == mask)
+ if ((iwl_read_direct32(trans, addr) & mask) == mask)
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
@@ -177,135 +186,132 @@ int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
return -ETIMEDOUT;
}
-static inline u32 __iwl_read_prph(struct iwl_bus *bus, u32 reg)
+static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
{
- iwl_write32(bus, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
- return iwl_read32(bus, HBUS_TARG_PRPH_RDAT);
+ iwl_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ return iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
}
-static inline void __iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
+static inline void __iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
- iwl_write32(bus, HBUS_TARG_PRPH_WADDR,
+ iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
- iwl_write32(bus, HBUS_TARG_PRPH_WDAT, val);
+ iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-u32 iwl_read_prph(struct iwl_bus *bus, u32 reg)
+u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&bus->reg_lock, flags);
- iwl_grab_nic_access(bus);
- val = __iwl_read_prph(bus, reg);
- iwl_release_nic_access(bus);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ val = __iwl_read_prph(trans, reg);
+ iwl_release_nic_access(trans);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
return val;
}
-void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
+void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- if (!iwl_grab_nic_access(bus)) {
- __iwl_write_prph(bus, addr, val);
- iwl_release_nic_access(bus);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ __iwl_write_prph(trans, addr, val);
+ iwl_release_nic_access(trans);
}
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
+void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- iwl_grab_nic_access(bus);
- __iwl_write_prph(bus, reg, __iwl_read_prph(bus, reg) | mask);
- iwl_release_nic_access(bus);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ __iwl_write_prph(trans, reg,
+ __iwl_read_prph(trans, reg) | mask);
+ iwl_release_nic_access(trans);
+ }
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
+void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&bus->reg_lock, flags);
- iwl_grab_nic_access(bus);
- __iwl_write_prph(bus, reg,
- (__iwl_read_prph(bus, reg) & mask) | bits);
- iwl_release_nic_access(bus);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ __iwl_write_prph(trans, reg,
+ (__iwl_read_prph(trans, reg) & mask) | bits);
+ iwl_release_nic_access(trans);
+ }
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
+void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&bus->reg_lock, flags);
- iwl_grab_nic_access(bus);
- val = __iwl_read_prph(bus, reg);
- __iwl_write_prph(bus, reg, (val & ~mask));
- iwl_release_nic_access(bus);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ val = __iwl_read_prph(trans, reg);
+ __iwl_write_prph(trans, reg, (val & ~mask));
+ iwl_release_nic_access(trans);
+ }
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
+void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words)
{
unsigned long flags;
int offs;
u32 *vals = buf;
- spin_lock_irqsave(&bus->reg_lock, flags);
- iwl_grab_nic_access(bus);
-
- iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
- rmb();
-
- for (offs = 0; offs < words; offs++)
- vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
-
- iwl_release_nic_access(bus);
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < words; offs++)
+ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(trans);
+ }
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
}
-u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
+u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
{
u32 value;
- _iwl_read_targ_mem_words(bus, addr, &value, 1);
+ _iwl_read_targ_mem_words(trans, addr, &value, 1);
return value;
}
-int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words)
{
unsigned long flags;
int offs, result = 0;
u32 *vals = buf;
- spin_lock_irqsave(&bus->reg_lock, flags);
- if (!iwl_grab_nic_access(bus)) {
- iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
- wmb();
-
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ if (likely(iwl_grab_nic_access(trans))) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < words; offs++)
- iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]);
- iwl_release_nic_access(bus);
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
+ iwl_release_nic_access(trans);
} else
result = -EBUSY;
- spin_unlock_irqrestore(&bus->reg_lock, flags);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
return result;
}
-int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
{
- return _iwl_write_targ_mem_words(bus, addr, &val, 1);
+ return _iwl_write_targ_mem_words(trans, addr, &val, 1);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index aae2eeb331a8..09b856768f62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -31,63 +31,63 @@
#include "iwl-devtrace.h"
#include "iwl-shared.h"
-#include "iwl-bus.h"
+#include "iwl-trans.h"
-static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
+static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
- trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
- bus_write8(bus, ofs, val);
+ trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
+ iwl_trans_write8(trans, ofs, val);
}
-static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
+static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
- trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
- bus_write32(bus, ofs, val);
+ trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
+ iwl_trans_write32(trans, ofs, val);
}
-static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
+static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
- u32 val = bus_read32(bus, ofs);
- trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
+ u32 val = iwl_trans_read32(trans, ofs);
+ trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
return val;
}
-void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
+void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
+int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
-int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
+int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout);
-int iwl_grab_nic_access_silent(struct iwl_bus *bus);
-int iwl_grab_nic_access(struct iwl_bus *bus);
-void iwl_release_nic_access(struct iwl_bus *bus);
+int iwl_grab_nic_access_silent(struct iwl_trans *trans);
+bool iwl_grab_nic_access(struct iwl_trans *trans);
+void iwl_release_nic_access(struct iwl_trans *trans);
-u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
-void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
+u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
+void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
-u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
-void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
-void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
-void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
+u32 iwl_read_prph(struct iwl_trans *trans, u32 reg);
+void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
+void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
+void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask);
-void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
+void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
-void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
+void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words);
-#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
+#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_words(bus, addr, buf, \
+ _iwl_read_targ_mem_words(trans, addr, buf, \
(bufsize) / sizeof(u32));\
} while (0)
-int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
void *buf, int words);
-u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
-int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
+u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
+int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 14dcbfcdc0fd..1993a2b7ae63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -71,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
- iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+ iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
/*
@@ -107,11 +107,12 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
};
u32 reg;
- reg = iwl_read32(bus(priv), CSR_LED_REG);
+ reg = iwl_read32(trans(priv), CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+ iwl_write32(trans(priv), CSR_LED_REG,
+ reg & CSR_LED_BSM_CTRL_MSK);
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
/* Set led pattern command */
@@ -125,7 +126,7 @@ static int iwl_led_cmd(struct iwl_priv *priv,
};
int ret;
- if (!test_bit(STATUS_READY, &priv->shrd->status))
+ if (!test_bit(STATUS_READY, &priv->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
@@ -177,6 +178,10 @@ void iwl_leds_init(struct iwl_priv *priv)
int mode = iwlagn_mod_params.led_mode;
int ret;
+ if (mode == IWL_LED_DISABLE) {
+ IWL_INFO(priv, "Led disabled\n");
+ return;
+ }
if (mode == IWL_LED_DEFAULT)
mode = cfg(priv)->led_mode;
@@ -202,7 +207,7 @@ void iwl_leds_init(struct iwl_priv *priv)
break;
}
- ret = led_classdev_register(bus(priv)->dev, &priv->led);
+ ret = led_classdev_register(trans(priv)->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 2550b3c7dcbf..b02a853103d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index f980e574e1f9..b6805f8e9a01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -35,7 +35,6 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
@@ -44,15 +43,14 @@
#include <asm/div64.h>
#include "iwl-eeprom.h"
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
#include "iwl-shared.h"
-#include "iwl-bus.h"
#include "iwl-trans.h"
+#include "iwl-op-mode.h"
/*****************************************************************************
*
@@ -136,7 +134,7 @@ iwlagn_iface_combinations_p2p[] = {
* other mac80211 functions grouped here.
*/
int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa)
+ const struct iwl_ucode_capabilities *capa)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -161,11 +159,14 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
- if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP
+ /* enable 11w if the uCode advertise */
if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->sta_data_size = sizeof(struct iwl_station_priv);
@@ -195,8 +196,9 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
- if (trans(priv)->ucode_wowlan.code.len &&
- device_can_wakeup(bus(priv)->dev)) {
+ if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ trans(priv)->ops->wowlan_suspend &&
+ device_can_wakeup(trans(priv)->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
@@ -234,7 +236,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
- hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
+ hw->wiphy->hw_version = trans(priv)->hw_id;
iwl_leds_init(priv);
@@ -262,9 +264,9 @@ static int __iwl_up(struct iwl_priv *priv)
struct iwl_rxon_context *ctx;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
}
@@ -277,13 +279,13 @@ static int __iwl_up(struct iwl_priv *priv)
}
}
- ret = iwl_run_init_ucode(trans(priv));
+ ret = iwl_run_init_ucode(priv);
if (ret) {
IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
goto error;
}
- ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR);
+ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
goto error;
@@ -295,9 +297,9 @@ static int __iwl_up(struct iwl_priv *priv)
return 0;
error:
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
- __iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ set_bit(STATUS_EXIT_PENDING, &priv->status);
+ iwl_down(priv);
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
IWL_ERR(priv, "Unable to initialize device.\n");
return ret;
@@ -305,22 +307,22 @@ static int __iwl_up(struct iwl_priv *priv)
static int iwlagn_mac_start(struct ieee80211_hw *hw)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
/* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
ret = __iwl_up(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
if (ret)
return ret;
IWL_DEBUG_INFO(priv, "Start UP work done.\n");
/* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
ret = -EIO;
iwlagn_led_enable(priv);
@@ -332,7 +334,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -341,14 +343,19 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
priv->is_open = 0;
+ mutex_lock(&priv->mutex);
iwl_down(priv);
+ mutex_unlock(&priv->mutex);
+
+ iwl_cancel_deferred_work(priv);
- flush_workqueue(priv->shrd->workqueue);
+ flush_workqueue(priv->workqueue);
/* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
- iwl_enable_rfkill_int(priv);
+ * even if interface is down, trans->down will leave the RF
+ * kill interrupt enabled
+ */
+ iwl_trans_stop_hw(trans(priv));
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -357,13 +364,13 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
if (iwlagn_mod_params.sw_crypto)
return;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
goto out;
@@ -375,7 +382,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
priv->have_rekey_data = true;
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -384,7 +391,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int ret;
@@ -392,7 +399,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
return -EINVAL;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
/* Don't attempt WoWLAN when not associated, tear down instead. */
if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
@@ -401,24 +408,22 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
goto out;
}
- ret = iwlagn_suspend(priv, hw, wowlan);
+ ret = iwlagn_suspend(priv, wowlan);
if (ret)
goto error;
- device_set_wakeup_enable(bus(priv)->dev, true);
+ device_set_wakeup_enable(trans(priv)->dev, true);
- /* Now let the ucode operate on its own */
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+ iwl_trans_wowlan_suspend(trans(priv));
goto out;
error:
- priv->shrd->wowlan = false;
+ priv->wowlan = false;
iwlagn_prepare_restart(priv);
ieee80211_restart_hw(priv->hw);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -426,42 +431,45 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
static int iwlagn_mac_resume(struct ieee80211_hw *hw)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct ieee80211_vif *vif;
unsigned long flags;
u32 base, status = 0xffffffff;
int ret = -EIO;
+ const struct fw_img *img;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&bus(priv)->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(bus(priv));
- if (ret == 0) {
- iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(bus(priv));
+ spin_lock_irqsave(&trans(priv)->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(trans(priv));
+ if (likely(ret == 0)) {
+ iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(trans(priv));
}
- spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+ spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (ret == 0) {
- struct iwl_trans *trans = trans(priv);
- if (!priv->wowlan_sram)
+ img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
+ if (!priv->wowlan_sram) {
priv->wowlan_sram =
- kzalloc(trans->ucode_wowlan.data.len,
+ kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
GFP_KERNEL);
+ }
if (priv->wowlan_sram)
_iwl_read_targ_mem_words(
- bus(priv), 0x800000, priv->wowlan_sram,
- trans->ucode_wowlan.data.len / 4);
+ trans(priv), 0x800000,
+ priv->wowlan_sram,
+ img->sec[IWL_UCODE_SECTION_DATA].len / 4);
}
#endif
}
@@ -469,9 +477,9 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- priv->shrd->wowlan = false;
+ priv->wowlan = false;
- device_set_wakeup_enable(bus(priv)->dev, false);
+ device_set_wakeup_enable(trans(priv)->dev, false);
iwlagn_prepare_restart(priv);
@@ -479,7 +487,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
ieee80211_resume_disconnect(vif);
@@ -491,7 +499,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
@@ -506,7 +514,7 @@ static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 iv32, u16 *phase1key)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
}
@@ -516,7 +524,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
int ret;
@@ -557,7 +565,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
return 0;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
@@ -608,7 +616,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -620,18 +628,18 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
- if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE))
+ if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -643,8 +651,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
break;
case IEEE80211_AMPDU_TX_START:
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
@@ -660,10 +666,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
priv->agg_tids_count);
}
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- if (!priv->agg_tids_count && cfg(priv)->ht_params &&
- cfg(priv)->ht_params->use_rts_for_aggregation) {
+ if (!priv->agg_tids_count &&
+ hw_params(priv).use_rts_for_aggregation) {
/*
* switch off RTS/CTS if it was previously enabled
*/
@@ -677,7 +681,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
@@ -686,16 +690,13 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret = 0;
+ int ret;
u8 sta_id;
- IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
sta->addr);
sta_priv->sta_id = IWL_INVALID_STATION;
@@ -710,17 +711,119 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
- goto out;
+ return ret;
}
sta_priv->sta_id = sta_id;
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl_rs_rate_init(priv, sta, sta_id);
- out:
- mutex_unlock(&priv->shrd->mutex);
+ return 0;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /*
+ * Station will be removed from device when the RXON
+ * is set to unassociated -- just deactivate it here
+ * to avoid re-programming it.
+ */
+ ret = 0;
+ iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr);
+ } else {
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_DEBUG_QUIET_RFKILL(priv,
+ "Error removing station %pM\n", sta->addr);
+ }
+ return ret;
+}
+
+static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ enum {
+ NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT,
+ } op = NONE;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ mutex_lock(&priv->mutex);
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ op = ADD;
+ else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ op = REMOVE;
+ else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ op = HT_RATE_INIT;
+ } else {
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ op = ADD_RATE_INIT;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ op = REMOVE;
+ }
+
+ switch (op) {
+ case ADD:
+ ret = iwlagn_mac_sta_add(hw, vif, sta);
+ break;
+ case REMOVE:
+ ret = iwlagn_mac_sta_remove(hw, vif, sta);
+ break;
+ case ADD_RATE_INIT:
+ ret = iwlagn_mac_sta_add(hw, vif, sta);
+ if (ret)
+ break;
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv,
+ "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
+ ret = 0;
+ break;
+ case HT_RATE_INIT:
+ /* Initialize rate scaling */
+ ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta);
+ if (ret)
+ break;
+ IWL_DEBUG_INFO(priv,
+ "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
+ ret = 0;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ /*
+ * mac80211 might WARN if we fail, but due the way we
+ * (badly) handle hard rfkill, we might fail here
+ */
+ if (iwl_is_rfkill(priv))
+ ret = 0;
+
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -729,7 +832,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
const struct iwl_channel_info *ch_info;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = ch_switch->channel;
@@ -747,14 +850,14 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (iwl_is_rfkill(priv->shrd))
+ if (iwl_is_rfkill(priv))
goto out;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
@@ -773,8 +876,6 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- spin_lock_irq(&priv->shrd->lock);
-
priv->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
@@ -791,23 +892,21 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
- spin_unlock_irq(&priv->shrd->lock);
-
iwl_set_rate(priv);
/*
* at this point, staging_rxon has the
* configuration for channel switch
*/
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = cpu_to_le16(ch);
if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -816,7 +915,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
__le32 filter_or = 0, filter_nand = 0;
struct iwl_rxon_context *ctx;
@@ -837,7 +936,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
#undef CHK
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
for_each_context(priv, ctx) {
ctx->staging.filter_flags &= ~filter_nand;
@@ -849,7 +948,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
*/
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
/*
* Receiving all multicast frames is always enabled by the
@@ -863,16 +962,16 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
goto done;
}
- if (iwl_is_rfkill(priv->shrd)) {
+ if (iwl_is_rfkill(priv)) {
IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
goto done;
}
@@ -891,7 +990,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
iwl_trans_wait_tx_queue_empty(trans(priv));
done:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -900,7 +999,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
enum nl80211_channel_type channel_type,
int duration)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
int err = 0;
@@ -911,9 +1010,9 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
err = -EBUSY;
goto out;
}
@@ -982,7 +1081,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
iwlagn_disable_roc(priv);
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -990,108 +1089,28 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
return -EOPNOTSUPP;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
-
- if (ctx->ctxid != IWL_RXON_CTX_PAN)
- return 0;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx)) {
- ret = 0;
- goto out;
- }
-
- if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
- &priv->shrd->status)) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
- if (ret)
- goto out;
-
- if (WARN_ON(sta_id != ctx->ap_sta_id)) {
- ret = -EIO;
- goto out_remove_sta;
- }
-
- memcpy(ctx->bssid, bssid, ETH_ALEN);
- ctx->preauth_bssid = true;
-
- ret = iwlagn_commit_rxon(priv, ctx);
-
- if (ret == 0)
- goto out;
-
- out_remove_sta:
- iwl_remove_station(priv, sta_id, bssid);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
-
- if (ctx->ctxid != IWL_RXON_CTX_PAN)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx))
- goto out;
-
- iwl_remove_station(priv, ctx->ap_sta_id, bssid);
- ctx->preauth_bssid = false;
- /* no need to commit */
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
enum ieee80211_rssi_event rssi_event)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (cfg(priv)->bt_params &&
cfg(priv)->bt_params->advanced_bt_coexist) {
@@ -1106,16 +1125,16 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
"ignoring RSSI callback\n");
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, bool set)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- queue_work(priv->shrd->workqueue, &priv->beacon_update);
+ queue_work(priv->workqueue, &priv->beacon_update);
return 0;
}
@@ -1124,10 +1143,9 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
- unsigned long flags;
int q;
if (WARN_ON(!ctx))
@@ -1135,7 +1153,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (!iwl_is_ready_rf(priv->shrd)) {
+ if (!iwl_is_ready_rf(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
return -EIO;
}
@@ -1147,7 +1165,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
q = AC_NUM - 1 - queue;
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ mutex_lock(&priv->mutex);
ctx->qos_data.def_qos_parm.ac[q].cw_min =
cpu_to_le16(params->cw_min);
@@ -1159,7 +1177,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
@@ -1167,7 +1185,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
return priv->ibss_manager == IWL_IBSS_MANAGER;
}
@@ -1187,7 +1205,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
struct ieee80211_vif *vif = ctx->vif;
int err;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/*
* This variable will be correct only when there's just
@@ -1221,7 +1239,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *tmp, *ctx = NULL;
int err;
@@ -1232,11 +1250,11 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
cancel_delayed_work_sync(&priv->hw_roc_disable_work);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwlagn_disable_roc(priv);
- if (!iwl_is_ready_rf(priv->shrd)) {
+ if (!iwl_is_ready_rf(priv)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
goto out;
@@ -1279,7 +1297,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
ctx->vif = NULL;
priv->iw_mode = NL80211_IFTYPE_STATION;
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1291,7 +1309,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (priv->scan_vif == vif) {
iwl_scan_cancel_timeout(priv, 200);
@@ -1318,12 +1336,12 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (WARN_ON(ctx->vif != vif)) {
struct iwl_rxon_context *tmp;
@@ -1336,7 +1354,7 @@ static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
iwl_teardown_interface(priv, vif, false);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1346,7 +1364,7 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_context *tmp;
@@ -1358,9 +1376,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
newtype = ieee80211_iftype_p2p(newtype, newp2p);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
- if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+ if (!ctx->vif || !iwl_is_ready_rf(priv)) {
/*
* Huh? But wait ... this can maybe happen when
* we're in the middle of a firmware restart!
@@ -1422,7 +1440,7 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
err = 0;
out:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
@@ -1432,7 +1450,7 @@ static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1440,7 +1458,7 @@ static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
if (req->n_channels == 0)
return -EINVAL;
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
/*
* If an internal scan is in progress, just set
@@ -1469,47 +1487,20 @@ static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->shrd->mutex);
-
- return ret;
-}
-
-static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
- "station %pM\n", sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
- if (ret)
- IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
+ mutex_unlock(&priv->mutex);
return ret;
}
static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask = 0;
- priv->stations[sta_id].sta.sleep_tx_count = 0;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ struct iwl_addsta_cmd cmd = {
+ .mode = STA_CONTROL_MODIFY_MSK,
+ .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+ .sta.sta_id = sta_id,
+ };
+ iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
}
static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1517,7 +1508,7 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
@@ -1566,8 +1557,7 @@ struct ieee80211_ops iwlagn_hw_ops = {
.ampdu_action = iwlagn_mac_ampdu_action,
.hw_scan = iwlagn_mac_hw_scan,
.sta_notify = iwlagn_mac_sta_notify,
- .sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwlagn_mac_sta_remove,
+ .sta_state = iwlagn_mac_sta_state,
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
@@ -1576,8 +1566,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
.rssi_callback = iwlagn_mac_rssi_callback,
CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
- .tx_sync = iwlagn_mac_tx_sync,
- .finish_tx_sync = iwlagn_mac_finish_tx_sync,
.set_tim = iwlagn_mac_set_tim,
};
@@ -1585,15 +1573,18 @@ struct ieee80211_ops iwlagn_hw_ops = {
struct ieee80211_hw *iwl_alloc_all(void)
{
struct iwl_priv *priv;
+ struct iwl_op_mode *op_mode;
/* mac80211 allocates memory for this device instance, including
* space for this driver's private structure */
struct ieee80211_hw *hw;
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) +
+ sizeof(struct iwl_op_mode), &iwlagn_hw_ops);
if (!hw)
goto out;
- priv = hw->priv;
+ op_mode = hw->priv;
+ priv = IWL_OP_MODE_GET_DVM(op_mode);
priv->hw = hw;
out:
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
new file mode 100644
index 000000000000..88dc4a0f96b4
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -0,0 +1,157 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+
+#include "iwl-notif-wait.h"
+
+
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
+{
+ spin_lock_init(&notif_wait->notif_wait_lock);
+ INIT_LIST_HEAD(&notif_wait->notif_waits);
+ init_waitqueue_head(&notif_wait->notif_waitq);
+}
+
+void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt)
+{
+ if (!list_empty(&notif_wait->notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(&notif_wait->notif_wait_lock);
+ list_for_each_entry(w, &notif_wait->notif_waits, list) {
+ if (w->cmd != pkt->hdr.cmd)
+ continue;
+ w->triggered = true;
+ if (w->fn)
+ w->fn(notif_wait, pkt, w->fn_data);
+ }
+ spin_unlock(&notif_wait->notif_wait_lock);
+
+ wake_up_all(&notif_wait->notif_waitq);
+ }
+}
+
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
+{
+ unsigned long flags;
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_irqsave(&notif_wait->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_irqrestore(&notif_wait->notif_wait_lock, flags);
+
+ wake_up_all(&notif_wait->notif_waitq);
+}
+
+
+void
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data),
+ void *fn_data)
+{
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_add(&wait_entry->list, &notif_wait->notif_waits);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(notif_wait->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-wifi.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 18501101a530..5e8af957aa7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-wifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,7 +41,6 @@
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
@@ -58,17 +57,73 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
+#ifndef __iwl_notif_wait_h__
+#define __iwl_notif_wait_h__
+
+#include <linux/wait.h>
+
+#include "iwl-trans.h"
+
+struct iwl_notif_wait_data {
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+};
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data);
+ void *fn_data;
+
+ u8 cmd;
+ bool triggered, aborted;
+};
+
+
+/* caller functions */
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
+void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt);
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+
+/* user functions */
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data),
+ void *fn_data);
-#ifndef __iwl_wifi_h__
-#define __iwl_wifi_h__
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
-#include "iwl-shared.h"
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry);
-int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
-void iwl_send_prio_tbl(struct iwl_trans *trans);
-int iwl_init_alive_start(struct iwl_trans *trans);
-int iwl_run_init_ucode(struct iwl_trans *trans);
-int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
- enum iwl_ucode_type ucode_type);
-#endif /* __iwl_wifi_h__ */
+#endif /* __iwl_notif_wait_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
new file mode 100644
index 000000000000..6ea4163ff56a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -0,0 +1,216 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_op_mode_h__
+#define __iwl_op_mode_h__
+
+struct iwl_op_mode;
+struct iwl_trans;
+struct sk_buff;
+struct iwl_device_cmd;
+struct iwl_rx_cmd_buffer;
+struct iwl_fw;
+
+/**
+ * DOC: Operational mode - what is it ?
+ *
+ * The operational mode (a.k.a. op_mode) is the layer that implements
+ * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
+ * the transport API to access the HW. The op_mode doesn't need to know how the
+ * underlying HW works, since the transport layer takes care of that.
+ *
+ * There can be several op_mode: i.e. different fw APIs will require two
+ * different op_modes. This is why the op_mode is virtualized.
+ */
+
+/**
+ * DOC: Life cycle of the Operational mode
+ *
+ * The operational mode has a very simple life cycle.
+ *
+ * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
+ * capabilities advertized by the fw file (in TLV format).
+ * 2) The driver layer starts the op_mode (ops->start)
+ * 3) The op_mode registers registers mac80211
+ * 4) The op_mode is governed by mac80211
+ * 5) The driver layer stops the op_mode
+ */
+
+/**
+ * struct iwl_op_mode_ops - op_mode specific operations
+ *
+ * The op_mode exports its ops so that external components can start it and
+ * interact with it. The driver layer typically calls the start and stop
+ * handlers, the transport layer calls the others.
+ *
+ * All the handlers MUST be implemented
+ *
+ * @start: start the op_mode. The transport layer is already allocated.
+ * May sleep
+ * @stop: stop the op_mode. Must free all the memory allocated.
+ * May sleep
+ * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
+ * HCMD the this Rx responds to.
+ * Must be atomic.
+ * @queue_full: notifies that a HW queue is full. Ac is the ac of the queue
+ * Must be atomic
+ * @queue_not_full: notifies that a HW queue is not full any more.
+ * Ac is the ac of the queue. Must be atomic
+ * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ * the radio is killed. Must be atomic.
+ * @free_skb: allows the transport layer to free skbs that haven't been
+ * reclaimed by the op_mode. This can happen when the driver is freed and
+ * there are Tx packets pending in the transport layer.
+ * Must be atomic
+ * @nic_error: error notification. Must be atomic
+ * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
+ * @nic_config: configure NIC, called before firmware is started.
+ * May sleep
+ */
+struct iwl_op_mode_ops {
+ struct iwl_op_mode *(*start)(struct iwl_trans *trans,
+ const struct iwl_fw *fw);
+ void (*stop)(struct iwl_op_mode *op_mode);
+ int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+ void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac);
+ void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac);
+ void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
+ void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+ void (*nic_error)(struct iwl_op_mode *op_mode);
+ void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
+ void (*nic_config)(struct iwl_op_mode *op_mode);
+};
+
+/**
+ * struct iwl_op_mode - operational mode
+ *
+ * This holds an implementation of the mac80211 / fw API.
+ *
+ * @ops - pointer to its own ops
+ */
+struct iwl_op_mode {
+ const struct iwl_op_mode_ops *ops;
+ const struct iwl_trans *trans;
+
+ char op_mode_specific[0] __aligned(sizeof(void *));
+};
+
+static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ op_mode->ops->stop(op_mode);
+}
+
+static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ return op_mode->ops->rx(op_mode, rxb, cmd);
+}
+
+static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac)
+{
+ op_mode->ops->queue_full(op_mode, ac);
+}
+
+static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
+ u8 ac)
+{
+ op_mode->ops->queue_not_full(op_mode, ac);
+}
+
+static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
+ bool state)
+{
+ op_mode->ops->hw_rf_kill(op_mode, state);
+}
+
+static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
+ struct sk_buff *skb)
+{
+ op_mode->ops->free_skb(op_mode, skb);
+}
+
+static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
+{
+ op_mode->ops->nic_error(op_mode);
+}
+
+static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ op_mode->ops->cmd_queue_full(op_mode);
+}
+
+static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+ op_mode->ops->nic_config(op_mode);
+}
+
+/*****************************************************
+* Op mode layers implementations
+******************************************************/
+extern const struct iwl_op_mode_ops iwl_dvm_ops;
+
+#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index fb30ea7ca96b..c5e339ee918b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,118 +64,13 @@
#include <linux/pci.h>
#include <linux/pci-aspm.h>
-#include "iwl-bus.h"
#include "iwl-io.h"
#include "iwl-shared.h"
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "iwl-cfg.h"
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-struct iwl_pci_bus {
- /* basic pci-network driver stuff */
- struct pci_dev *pci_dev;
-
- /* pci hardware address support */
- void __iomem *hw_base;
-};
-
-#define IWL_BUS_GET_PCI_BUS(_iwl_bus) \
- ((struct iwl_pci_bus *) ((_iwl_bus)->bus_specific))
-
-#define IWL_BUS_GET_PCI_DEV(_iwl_bus) \
- ((IWL_BUS_GET_PCI_BUS(_iwl_bus))->pci_dev)
-
-static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
-{
- int pos;
- u16 pci_lnk_ctl;
-
- struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
-
- pos = pci_pcie_cap(pci_dev);
- pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
-static bool iwl_pci_is_pm_supported(struct iwl_bus *bus)
-{
- u16 lctl = iwl_pciexp_link_ctrl(bus);
-
- return !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-}
-
-static void iwl_pci_apm_config(struct iwl_bus *bus)
-{
- /*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
- */
- u16 lctl = iwl_pciexp_link_ctrl(bus);
-
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
- /* L1-ASPM enabled; disable(!) L0S */
- iwl_set_bit(bus, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
- } else {
- /* L1-ASPM disabled; enable(!) L0S */
- iwl_clear_bit(bus, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
- }
-}
-
-static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
- int buf_len)
-{
- struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
-
- snprintf(buf, buf_len, "PCI ID: 0x%04X:0x%04X", pci_dev->device,
- pci_dev->subsystem_device);
-}
-
-static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
-{
- struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
-
- return (pci_dev->device << 16) + pci_dev->subsystem_device;
-}
-
-static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
-{
- iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
-}
-
-static void iwl_pci_write32(struct iwl_bus *bus, u32 ofs, u32 val)
-{
- iowrite32(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
-}
-
-static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
-{
- u32 val = ioread32(IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
- return val;
-}
-
-static const struct iwl_bus_ops bus_ops_pci = {
- .get_pm_support = iwl_pci_is_pm_supported,
- .apm_config = iwl_pci_apm_config,
- .get_hw_id_string = iwl_pci_get_hw_id_string,
- .get_hw_id = iwl_pci_get_hw_id,
- .write8 = iwl_pci_write8,
- .write32 = iwl_pci_write32,
- .read32 = iwl_pci_read32,
-};
+#include "iwl-drv.h"
+#include "iwl-trans.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
@@ -263,9 +158,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
- {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -346,6 +241,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
/* 105 Series */
{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
@@ -362,132 +258,62 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- struct iwl_bus *bus;
- struct iwl_pci_bus *pci_bus;
- u16 pci_cmd;
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ struct iwl_shared *shrd;
+ struct iwl_trans *iwl_trans;
int err;
- bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
- if (!bus) {
+ shrd = kzalloc(sizeof(*iwl_trans->shrd), GFP_KERNEL);
+ if (!shrd) {
dev_printk(KERN_ERR, &pdev->dev,
- "Couldn't allocate iwl_pci_bus");
+ "Couldn't allocate iwl_shared");
err = -ENOMEM;
- goto out_no_pci;
+ goto out_free_bus;
}
- pci_bus = IWL_BUS_GET_PCI_BUS(bus);
- pci_bus->pci_dev = pdev;
-
- pci_set_drvdata(pdev, bus);
-
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_no_pci;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (err) {
- dev_printk(KERN_ERR, bus->dev,
- "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
- goto out_pci_disable_device;
- }
-
- pci_bus->hw_base = pci_iomap(pdev, 0, 0);
- if (!pci_bus->hw_base) {
- dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
- err = -ENODEV;
- goto out_pci_release_regions;
+#ifdef CONFIG_IWLWIFI_IDI
+ iwl_trans = iwl_trans_idi_alloc(shrd, pdev, ent);
+#else
+ iwl_trans = iwl_trans_pcie_alloc(shrd, pdev, ent);
+#endif
+ if (iwl_trans == NULL) {
+ err = -ENOMEM;
+ goto out_free_bus;
}
- dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_base = %p\n", pci_bus->hw_base);
+ shrd->trans = iwl_trans;
+ pci_set_drvdata(pdev, iwl_trans);
- dev_printk(KERN_INFO, &pdev->dev,
- "HW Revision ID = 0x%X\n", pdev->revision);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- err = pci_enable_msi(pdev);
+ err = iwl_drv_start(shrd, iwl_trans, cfg);
if (err)
- dev_printk(KERN_ERR, &pdev->dev,
- "pci_enable_msi failed(0X%x)", err);
-
- /* TODO: Move this away, not needed if not MSI */
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- }
-
- bus->dev = &pdev->dev;
- bus->irq = pdev->irq;
- bus->ops = &bus_ops_pci;
+ goto out_free_trans;
- err = iwl_probe(bus, &trans_ops_pcie, cfg);
- if (err)
- goto out_disable_msi;
return 0;
-out_disable_msi:
- pci_disable_msi(pdev);
- pci_iounmap(pdev, pci_bus->hw_base);
-out_pci_release_regions:
+out_free_trans:
+ iwl_trans_free(iwl_trans);
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
-out_pci_disable_device:
- pci_disable_device(pdev);
-out_no_pci:
- kfree(bus);
+out_free_bus:
+ kfree(shrd);
return err;
}
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
{
- struct iwl_bus *bus = pci_get_drvdata(pdev);
- struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
- struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
- struct iwl_shared *shrd = bus->shrd;
+ struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+ struct iwl_shared *shrd = iwl_trans->shrd;
- iwl_remove(shrd->priv);
+ iwl_drv_stop(shrd);
+ iwl_trans_free(shrd->trans);
- pci_disable_msi(pci_dev);
- pci_iounmap(pci_dev, pci_bus->hw_base);
- pci_release_regions(pci_dev);
- pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
+ pci_set_drvdata(pdev, NULL);
- kfree(bus);
+ kfree(shrd);
}
#ifdef CONFIG_PM_SLEEP
@@ -495,22 +321,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
static int iwl_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_bus *bus = pci_get_drvdata(pdev);
- struct iwl_shared *shrd = bus->shrd;
+ struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_trans_suspend(shrd->trans);
+ return iwl_trans_suspend(iwl_trans);
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_bus *bus = pci_get_drvdata(pdev);
- struct iwl_shared *shrd = bus->shrd;
+ struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -523,7 +347,7 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_trans_resume(shrd->trans);
+ return iwl_trans_resume(iwl_trans);
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 2b188a6025b3..958d9d09aee3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -215,7 +215,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
- if (hw_params(priv).shadow_reg_enable)
+ if (cfg(priv)->base_params->shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -301,7 +301,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
- if (hw_params(priv).shadow_reg_enable)
+ if (cfg(priv)->base_params->shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -336,7 +336,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return iwl_trans_send_cmd_pdu(trans(priv), POWER_TABLE_CMD, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -348,7 +348,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
- if (priv->shrd->wowlan)
+ if (priv->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
else if (!cfg(priv)->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
@@ -383,7 +383,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
int ret;
bool update_chains;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* Don't update the RX chain when chain noise calibration is running */
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
@@ -392,12 +392,12 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
return 0;
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return -EIO;
/* scan complete use sleep_power_next, need to be updated */
memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->shrd->status) && !force) {
+ if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
return 0;
}
@@ -436,7 +436,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- priv->power_data.bus_pm = bus_get_pm_support(bus(priv));
+ priv->power_data.bus_pm = trans(priv)->pm_support;
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 5f7b720cf1a4..07a19fce5fdc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index bebdd828f324..75dc20bd965b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -216,10 +216,6 @@
#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
-#define SCD_QUEUECHAIN_SEL_ALL(priv) \
- (((1<<hw_params(priv).max_txq_num) - 1) &\
- (~(1<<(priv)->shrd->cmd_queue)))
-
#define SCD_BASE (PRPH_BASE + 0xa02c00)
#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0)
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a6454726737e..902efe4bc898 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -57,39 +57,39 @@
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
int ret;
- struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_ABORT_CMD,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
+ __le32 *status;
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->shrd->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
- test_bit(STATUS_FW_ERROR, &priv->shrd->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (!test_bit(STATUS_READY, &priv->status) ||
+ !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
+ !test_bit(STATUS_SCAN_HW, &priv->status) ||
+ test_bit(STATUS_FW_ERROR, &priv->shrd->status))
return -EIO;
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret)
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->u.status != CAN_ABORT_STATUS) {
+ status = (void *)cmd.resp_pkt->data;
+ if (*status != CAN_ABORT_STATUS) {
/* The scan abort will return 1 for success or
* 2 for "failure". A failure condition can be
* due to simply not being in an active scan which
* can occur if we send the scan abort before we
* the microcode has notified us that a scan is
* completed. */
- IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
+ IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n",
+ le32_to_cpu(*status));
ret = -EIO;
}
- iwl_free_pages(priv->shrd, cmd.reply_page);
+ iwl_free_resp(&cmd);
return ret;
}
@@ -116,20 +116,20 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
{
bool aborted;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status))
+ if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status))
return;
IWL_DEBUG_SCAN(priv, "Completed scan.\n");
cancel_delayed_work(&priv->scan_check);
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
+ aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
if (aborted)
IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
goto out_settings;
}
@@ -165,7 +165,7 @@ out_complete:
out_settings:
/* Can we still talk to firmware ? */
- if (!iwl_is_ready_rf(priv->shrd))
+ if (!iwl_is_ready_rf(priv))
return;
iwlagn_post_scan(priv);
@@ -173,18 +173,18 @@ out_settings:
void iwl_force_scan_end(struct iwl_priv *priv)
{
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
return;
}
IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->shrd->status);
- clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
- clear_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ clear_bit(STATUS_SCAN_COMPLETE, &priv->status);
iwl_complete_scan(priv, true);
}
@@ -192,14 +192,14 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
- if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
return;
}
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
+ if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
return;
}
@@ -218,7 +218,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
int iwl_scan_cancel(struct iwl_priv *priv)
{
IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->shrd->workqueue, &priv->abort_scan);
+ queue_work(priv->workqueue, &priv->abort_scan);
return 0;
}
@@ -231,14 +231,14 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies(ms);
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
iwl_do_scan_abort(priv);
while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->shrd->status))
+ if (!test_bit(STATUS_SCAN_HW, &priv->status))
goto finished;
msleep(20);
}
@@ -261,13 +261,12 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
/* Service response to REPLY_SCAN_CMD (0x80) */
static int iwl_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanreq_notification *notif =
- (struct iwl_scanreq_notification *)pkt->u.raw;
+ struct iwl_scanreq_notification *notif = (void *)pkt->data;
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
@@ -276,12 +275,12 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv,
/* Service SCAN_START_NOTIFICATION (0x82) */
static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanstart_notification *notif =
- (struct iwl_scanstart_notification *)pkt->u.raw;
+ struct iwl_scanstart_notification *notif = (void *)pkt->data;
+
priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
IWL_DEBUG_SCAN(priv, "Scan start: "
"%d [802.11%s] "
@@ -303,13 +302,12 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanresults_notification *notif =
- (struct iwl_scanresults_notification *)pkt->u.raw;
+ struct iwl_scanresults_notification *notif = (void *)pkt->data;
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
@@ -329,11 +327,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+ struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
scan_notif->scanned_channels,
@@ -352,9 +350,9 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
* to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
* to avoid a race there.
*/
- set_bit(STATUS_SCAN_COMPLETE, &priv->shrd->status);
- clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
- queue_work(priv->shrd->workqueue, &priv->scan_completed);
+ set_bit(STATUS_SCAN_COMPLETE, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ queue_work(priv->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
iwl_advanced_bt_coexist(priv) &&
@@ -374,7 +372,7 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = scan_notif->bt_status;
- queue_work(priv->shrd->workqueue,
+ queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
return 0;
@@ -414,10 +412,25 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
for_each_context(priv, ctx) {
u16 value;
- if (!iwl_is_associated_ctx(ctx))
- continue;
- if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
+ switch (ctx->staging.dev_type) {
+ case RXON_DEV_TYPE_P2P:
+ /* no timing constraints */
continue;
+ case RXON_DEV_TYPE_ESS:
+ default:
+ /* timing constraints if associated */
+ if (!iwl_is_associated_ctx(ctx))
+ continue;
+ break;
+ case RXON_DEV_TYPE_CP:
+ case RXON_DEV_TYPE_2STA:
+ /*
+ * These seem to always have timers for TBTT
+ * active in uCode even when not associated yet.
+ */
+ break;
+ }
+
value = ctx->beacon_int;
if (!value)
value = IWL_PASSIVE_DWELL_BASE;
@@ -559,6 +572,53 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
return added;
}
+/**
+ * iwl_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
+ const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16)len;
+}
+
static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct iwl_host_cmd cmd = {
@@ -581,7 +641,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
if (vif)
ctx = iwl_rxon_ctx_from_vif(vif);
@@ -778,7 +838,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->rx_chain = cpu_to_le16(rx_chain);
switch (priv->scan_type) {
case IWL_SCAN_NORMAL:
- cmd_len = iwl_fill_probe_req(priv,
+ cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
@@ -788,7 +848,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
case IWL_SCAN_RADIO_RESET:
case IWL_SCAN_ROC:
/* use bcast addr, will not be transmitted but must be valid */
- cmd_len = iwl_fill_probe_req(priv,
+ cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
@@ -867,15 +927,15 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->len = cpu_to_le16(cmd.len[0]);
/* set scan bit here for PAN params */
- set_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ set_bit(STATUS_SCAN_HW, &priv->status);
ret = iwlagn_set_pan_params(priv);
if (ret)
return ret;
- ret = iwl_trans_send_cmd(trans(priv), &cmd);
+ ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret) {
- clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
iwlagn_set_pan_params(priv);
}
@@ -898,22 +958,22 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
{
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
cancel_delayed_work(&priv->scan_check);
- if (!iwl_is_ready_rf(priv->shrd)) {
+ if (!iwl_is_ready_rf(priv)) {
IWL_WARN(priv, "Request scan called when driver not ready.\n");
return -EIO;
}
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_SCAN(priv,
"Multiple concurrent scan requests in parallel.\n");
return -EBUSY;
}
- if (test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
return -EBUSY;
}
@@ -923,19 +983,19 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
"internal short ");
- set_bit(STATUS_SCANNING, &priv->shrd->status);
+ set_bit(STATUS_SCANNING, &priv->status);
priv->scan_type = scan_type;
priv->scan_start = jiffies;
priv->scan_band = band;
ret = iwlagn_request_scan(priv, vif);
if (ret) {
- clear_bit(STATUS_SCANNING, &priv->shrd->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
priv->scan_type = IWL_SCAN_NORMAL;
return ret;
}
- queue_delayed_work(priv->shrd->workqueue, &priv->scan_check,
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
return 0;
@@ -948,7 +1008,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
*/
void iwl_internal_short_hw_scan(struct iwl_priv *priv)
{
- queue_work(priv->shrd->workqueue, &priv->start_internal_scan);
+ queue_work(priv->workqueue, &priv->start_internal_scan);
}
static void iwl_bg_start_internal_scan(struct work_struct *work)
@@ -958,14 +1018,14 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
IWL_DEBUG_SCAN(priv, "Start internal scan\n");
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
goto unlock;
}
- if (test_bit(STATUS_SCANNING, &priv->shrd->status)) {
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
goto unlock;
}
@@ -973,7 +1033,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
unlock:
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_scan_check(struct work_struct *data)
@@ -986,56 +1046,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
/* Since we are here firmware does not finish scan and
* most likely is in bad shape, so we don't bother to
* send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->shrd->mutex);
-}
-
-/**
- * iwl_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ies, int ie_len, int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* ...next IE... */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our indirect SSID IE */
- left -= 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
-
- if (WARN_ON(left < ie_len))
- return len;
-
- if (ies && ie_len) {
- memcpy(pos, ies, ie_len);
- len += ie_len;
- }
-
- return (u16)len;
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_abort_scan(struct work_struct *work)
@@ -1046,9 +1059,9 @@ static void iwl_bg_abort_scan(struct work_struct *work)
/* We keep scan_check work queued in case when firmware will not
* report back scan completed notification */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_scan_completed(struct work_struct *work)
@@ -1056,9 +1069,9 @@ static void iwl_bg_scan_completed(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, scan_completed);
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_process_scan_complete(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
@@ -1076,8 +1089,8 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->scan_completed);
if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
iwl_force_scan_end(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index dc55cc4a8108..b515d657a0ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,12 +65,11 @@
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/gfp.h>
-#include <linux/mm.h> /* for page_address */
#include <net/mac80211.h>
#include "iwl-commands.h"
+#include "iwl-fw.h"
/**
* DOC: shared area - role and goal
@@ -94,7 +93,6 @@
* This implementation is iwl-pci.c
*/
-struct iwl_bus;
struct iwl_priv;
struct iwl_trans;
struct iwl_sensitivity_ranges;
@@ -102,7 +100,7 @@ struct iwl_trans_ops;
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
extern struct iwl_mod_params iwlagn_mod_params;
@@ -117,7 +115,6 @@ extern struct iwl_mod_params iwlagn_mod_params;
* Holds the module parameters
*
* @sw_crypto: using hardware encryption, default = 0
- * @num_of_queues: number of tx queue, HW dependent
* @disable_11n: disable 11n capabilities, default = 0,
* use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 1
@@ -139,7 +136,6 @@ extern struct iwl_mod_params iwlagn_mod_params;
*/
struct iwl_mod_params {
int sw_crypto;
- int num_of_queues;
unsigned int disable_11n;
int amsdu_size_8K;
int antenna;
@@ -164,7 +160,6 @@ struct iwl_mod_params {
*
* Holds the module parameters
*
- * @max_txq_num: Max # Tx queues supported
* @num_ampdu_queues: num of ampdu queues
* @tx_chains_num: Number of TX chains
* @rx_chains_num: Number of RX chains
@@ -173,27 +168,23 @@ struct iwl_mod_params {
* @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
* @sku: sku read from EEPROM
* @rx_page_order: Rx buffer page order
- * @max_inst_size: for ucode use
- * @max_data_size: for ucode use
* @ct_kill_threshold: temperature threshold - in hw dependent unit
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
* @wd_timeout: TX queues watchdog timeout
* @struct iwl_sensitivity_ranges: range of sensitivity values
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
*/
struct iwl_hw_params {
- u8 max_txq_num;
u8 num_ampdu_queues;
u8 tx_chains_num;
u8 rx_chains_num;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 ht40_channel;
- bool shadow_reg_enable;
+ bool use_rts_for_aggregation;
u16 sku;
u32 rx_page_order;
- u32 max_inst_size;
- u32 max_data_size;
u32 ct_kill_threshold;
u32 ct_kill_exit_threshold;
unsigned int wd_timeout;
@@ -201,62 +192,6 @@ struct iwl_hw_params {
const struct iwl_sensitivity_ranges *sens;
};
-/**
- * enum iwl_ucode_type
- *
- * The type of ucode currently loaded on the hardware.
- *
- * @IWL_UCODE_NONE: No ucode loaded
- * @IWL_UCODE_REGULAR: Normal runtime ucode
- * @IWL_UCODE_INIT: Initial ucode
- * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
- */
-enum iwl_ucode_type {
- IWL_UCODE_NONE,
- IWL_UCODE_REGULAR,
- IWL_UCODE_INIT,
- IWL_UCODE_WOWLAN,
-};
-
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
- struct list_head list;
-
- void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
- void *data);
- void *fn_data;
-
- u8 cmd;
- bool triggered, aborted;
-};
-
-/**
- * enum iwl_pa_type - Power Amplifier type
- * @IWL_PA_SYSTEM: based on uCode configuration
- * @IWL_PA_INTERNAL: use Internal only
- */
-enum iwl_pa_type {
- IWL_PA_SYSTEM = 0,
- IWL_PA_INTERNAL = 1,
-};
-
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@@ -264,11 +199,80 @@ enum iwl_pa_type {
* LED ON = RF ON
* LED OFF = RF OFF
* IWL_LED_BLINK: adjust led blink rate based on blink table
+ * IWL_LED_DISABLE: led disabled
*/
enum iwl_led_mode {
IWL_LED_DEFAULT,
IWL_LED_RF_STATE,
IWL_LED_BLINK,
+ IWL_LED_DISABLE,
+};
+
+/*
+ * @max_ll_items: max number of OTP blocks
+ * @shadow_ram_support: shadow support for OTP memory
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @support_wimax_coexist: support wimax/wifi co-exist
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @wd_timeout: TX queues watchdog timeout
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @shadow_reg_enable: HW shadhow register bit
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * @no_idle_support: do not support idle mode
+ * wd_disable: disable watchdog timer
+ */
+struct iwl_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues;/* def: HW dependent */
+ /* for iwl_apm_init() */
+ u32 pll_cfg_val;
+
+ const u16 max_ll_items;
+ const bool shadow_ram_support;
+ u16 led_compensation;
+ bool adv_thermal_throttle;
+ bool support_ct_kill_exit;
+ const bool support_wimax_coexist;
+ u8 plcp_delta_threshold;
+ s32 chain_noise_scale;
+ unsigned int wd_timeout;
+ u32 max_event_log_size;
+ const bool shadow_reg_enable;
+ const bool hd_v2;
+ const bool no_idle_support;
+ const bool wd_disable;
+};
+
+/*
+ * @advanced_bt_coexist: support advanced bt coexist
+ * @bt_init_traffic_load: specify initial bt traffic load
+ * @bt_prio_boost: default bt priority boost value
+ * @agg_time_limit: maximum number of uSec in aggregation
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
+ */
+struct iwl_bt_params {
+ bool advanced_bt_coexist;
+ u8 bt_init_traffic_load;
+ u8 bt_prio_boost;
+ u16 agg_time_limit;
+ bool bt_sco_disable;
+ bool bt_session_2;
+};
+/*
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ */
+struct iwl_ht_params {
+ const bool ht_greenfield_support; /* if used set to true */
+ bool use_rts_for_aggregation;
+ enum ieee80211_smps_mode smps_mode;
};
/**
@@ -281,9 +285,10 @@ enum iwl_led_mode {
* @ucode_api_ok: oldest version of the uCode API that is OK to load
* without a warning, for use in transitions
* @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @max_inst_size: The maximal length of the fw inst section
+ * @max_data_size: The maximal length of the fw data section
* @valid_tx_ant: valid transmit antenna
* @valid_rx_ant: valid receive antenna
- * @sku: sku information from EEPROM
* @eeprom_ver: EEPROM version
* @eeprom_calib_ver: EEPROM calibration version
* @lib: pointer to the lib ops
@@ -291,7 +296,6 @@ enum iwl_led_mode {
* @base_params: pointer to basic parameters
* @ht_params: point to ht patameters
* @bt_params: pointer to bt parameters
- * @pa_type: used by 6000 series only to identify the type of Power Amplifier
* @need_temp_offset_calib: need to perform temperature offset calibration
* @no_xtal_calib: some devices do not need crystal calibration data,
* don't send it to those
@@ -318,19 +322,19 @@ struct iwl_cfg {
const unsigned int ucode_api_max;
const unsigned int ucode_api_ok;
const unsigned int ucode_api_min;
+ const u32 max_data_size;
+ const u32 max_inst_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
- u16 sku;
u16 eeprom_ver;
u16 eeprom_calib_ver;
const struct iwl_lib_ops *lib;
void (*additional_nic_config)(struct iwl_priv *priv);
/* params not likely to change within a device family */
- struct iwl_base_params *base_params;
+ const struct iwl_base_params *base_params;
/* params likely to change within a device family */
- struct iwl_ht_params *ht_params;
- struct iwl_bt_params *bt_params;
- enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
+ const struct iwl_ht_params *ht_params;
+ const struct iwl_bt_params *bt_params;
const bool need_temp_offset_calib; /* if used set to true */
const bool no_xtal_calib;
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
@@ -345,10 +349,6 @@ struct iwl_cfg {
/**
* struct iwl_shared - shared fields for all the layers of the driver
*
- * @dbg_level_dev: dbg level set per device. Prevails on
- * iwlagn_mod_params.debug_level if set (!= 0)
- * @ucode_owner: IWL_OWNERSHIP_*
- * @cmd_queue: command queue number
* @status: STATUS_*
* @wowlan: are we running wowlan uCode
* @valid_contexts: microcode/device supports multiple contexts
@@ -356,45 +356,22 @@ struct iwl_cfg {
* @cfg: see struct iwl_cfg
* @priv: pointer to the upper layer data
* @trans: pointer to the transport layer data
+ * @nic: pointer to the nic data
* @hw_params: see struct iwl_hw_params
- * @workqueue: the workqueue used by all the layers of the driver
* @lock: protect general shared data
- * @sta_lock: protects the station table.
- * If lock and sta_lock are needed, lock must be acquired first.
- * @mutex:
- * @wait_command_queue: the wait_queue for SYNC host command nad uCode load
* @eeprom: pointer to the eeprom/OTP image
* @ucode_type: indicator of loaded ucode image
- * @notif_waits: things waiting for notification
- * @notif_wait_lock: lock protecting notification
- * @notif_waitq: head of notification wait queue
* @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 dbg_level_dev;
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-#define IWL_OWNERSHIP_DRIVER 0
-#define IWL_OWNERSHIP_TM 1
- u8 ucode_owner;
- u8 cmd_queue;
unsigned long status;
- bool wowlan;
u8 valid_contexts;
- struct iwl_bus *bus;
- struct iwl_cfg *cfg;
- struct iwl_priv *priv;
+ const struct iwl_cfg *cfg;
struct iwl_trans *trans;
+ void *drv;
struct iwl_hw_params hw_params;
-
- struct workqueue_struct *workqueue;
- spinlock_t lock;
- spinlock_t sta_lock;
- struct mutex mutex;
-
- wait_queue_head_t wait_command_queue;
+ const struct iwl_fw *fw;
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
@@ -402,11 +379,6 @@ struct iwl_shared {
/* ucode related variables */
enum iwl_ucode_type ucode_type;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
-
struct {
u32 error_event_table;
u32 log_event_table;
@@ -414,112 +386,14 @@ struct iwl_shared {
};
-/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
-#define priv(_m) ((_m)->shrd->priv)
+/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
#define cfg(_m) ((_m)->shrd->cfg)
-#define bus(_m) ((_m)->shrd->bus)
#define trans(_m) ((_m)->shrd->trans)
#define hw_params(_m) ((_m)->shrd->hw_params)
-#ifdef CONFIG_IWLWIFI_DEBUG
-/*
- * iwl_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
+static inline bool iwl_have_debug_level(u32 level)
{
- if (shrd->dbg_level_dev)
- return shrd->dbg_level_dev;
- else
- return iwlagn_mod_params.debug_level;
-}
-#else
-static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
-{
- return iwlagn_mod_params.debug_level;
-}
-#endif
-
-static inline void iwl_free_pages(struct iwl_shared *shrd, unsigned long page)
-{
- free_pages(page, shrd->hw_params.rx_page_order);
-}
-
-/**
- * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
+ return iwlagn_mod_params.debug_level & level;
}
enum iwl_rxon_context_id {
@@ -529,64 +403,10 @@ enum iwl_rxon_context_id {
NUM_IWL_RXON_CTX
};
-int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
- struct iwl_cfg *cfg);
-void __devexit iwl_remove(struct iwl_priv * priv);
-struct iwl_device_cmd;
-int __must_check iwl_rx_dispatch(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
- struct iwl_device_cmd *cmd);
-
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
-void iwl_nic_config(struct iwl_priv *priv);
-void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
-void iwl_apm_stop(struct iwl_priv *priv);
-int iwl_apm_init(struct iwl_priv *priv);
-void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
const char *get_cmd_string(u8 cmd);
-bool iwl_check_for_ct_kill(struct iwl_priv *priv);
-
-void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
-void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
-
-/* notification wait support */
-void iwl_abort_notification_waits(struct iwl_shared *shrd);
-void __acquires(wait_entry)
-iwl_init_notification_wait(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_trans *trans,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data);
-int __must_check __releases(wait_entry)
-iwl_wait_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout);
-void __releases(wait_entry)
-iwl_remove_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-void iwl_reset_traffic_log(struct iwl_priv *priv);
-#endif /* CONFIG_IWLWIFI_DEBUGFS */
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctxid);
-#else
-static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctxid)
-{
-}
-#endif
#define IWL_CMD(x) case x: return #x
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
/*****************************************************
* DRIVER STATUS FUNCTIONS
@@ -612,46 +432,4 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
#define STATUS_CHANNEL_SWITCH_PENDING 19
#define STATUS_SCAN_COMPLETE 20
-static inline int iwl_is_ready(struct iwl_shared *shrd)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &shrd->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &shrd->status) &&
- !test_bit(STATUS_EXIT_PENDING, &shrd->status);
-}
-
-static inline int iwl_is_alive(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_ALIVE, &shrd->status);
-}
-
-static inline int iwl_is_init(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_INIT, &shrd->status);
-}
-
-static inline int iwl_is_rfkill_hw(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_RF_KILL_HW, &shrd->status);
-}
-
-static inline int iwl_is_rfkill(struct iwl_shared *shrd)
-{
- return iwl_is_rfkill_hw(shrd);
-}
-
-static inline int iwl_is_ctkill(struct iwl_shared *shrd)
-{
- return test_bit(STATUS_CT_KILL, &shrd->status);
-}
-
-static inline int iwl_is_ready_rf(struct iwl_shared *shrd)
-{
- if (iwl_is_rfkill(shrd))
- return 0;
-
- return iwl_is_ready(shrd);
-}
-
#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 4a5cddd2d56b..76f7f9251436 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,7 +70,6 @@
#include <net/mac80211.h>
#include <net/netlink.h>
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
@@ -78,7 +77,15 @@
#include "iwl-agn.h"
#include "iwl-testmode.h"
#include "iwl-trans.h"
-#include "iwl-bus.h"
+#include "iwl-fh.h"
+#include "iwl-prph.h"
+
+
+/* Periphery registers absolute lower bound. This is used in order to
+ * differentiate registery access through HBUS_TARG_PRPH_* and
+ * HBUS_TARG_MEM_* accesses.
+ */
+#define IWL_TM_ABS_PRPH_START (0xA00000)
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -109,19 +116,24 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
- [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
- [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
[IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
};
/*
* See the struct iwl_rx_packet in iwl-commands.h for the format of the
* received events from the device
*/
-static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
+static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (pkt)
@@ -152,7 +164,7 @@ static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
*/
static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb;
@@ -168,35 +180,36 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
GFP_ATOMIC);
if (skb == NULL) {
- IWL_DEBUG_INFO(priv,
+ IWL_ERR(priv,
"Run out of memory for messages to user space ?\n");
return;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
- NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data);
+ /* the length doesn't include len_n_flags field, so add it manually */
+ NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data);
cfg80211_testmode_event(skb, GFP_ATOMIC);
return;
nla_put_failure:
kfree_skb(skb);
- IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n");
+ IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
}
void iwl_testmode_init(struct iwl_priv *priv)
{
- priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+ priv->pre_rx_handler = NULL;
priv->testmode_trace.trace_enabled = false;
- priv->testmode_sram.sram_readed = false;
+ priv->testmode_mem.read_in_progress = false;
}
-static void iwl_sram_cleanup(struct iwl_priv *priv)
+static void iwl_mem_cleanup(struct iwl_priv *priv)
{
- if (priv->testmode_sram.sram_readed) {
- kfree(priv->testmode_sram.buff_addr);
- priv->testmode_sram.buff_addr = NULL;
- priv->testmode_sram.buff_size = 0;
- priv->testmode_sram.num_chunks = 0;
- priv->testmode_sram.sram_readed = false;
+ if (priv->testmode_mem.read_in_progress) {
+ kfree(priv->testmode_mem.buff_addr);
+ priv->testmode_mem.buff_addr = NULL;
+ priv->testmode_mem.buff_size = 0;
+ priv->testmode_mem.num_chunks = 0;
+ priv->testmode_mem.read_in_progress = false;
}
}
@@ -205,7 +218,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
if (priv->testmode_trace.trace_enabled) {
if (priv->testmode_trace.cpu_addr &&
priv->testmode_trace.dma_addr)
- dma_free_coherent(bus(priv)->dev,
+ dma_free_coherent(trans(priv)->dev,
priv->testmode_trace.total_size,
priv->testmode_trace.cpu_addr,
priv->testmode_trace.dma_addr);
@@ -222,9 +235,10 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
void iwl_testmode_cleanup(struct iwl_priv *priv)
{
iwl_trace_cleanup(priv);
- iwl_sram_cleanup(priv);
+ iwl_mem_cleanup(priv);
}
+
/*
* This function handles the user application commands to the ucode.
*
@@ -233,35 +247,80 @@ void iwl_testmode_cleanup(struct iwl_priv *priv)
* host command to the ucode.
*
* If any mandatory field is missing, -ENOMSG is replied to the user space
- * application; otherwise, the actual execution result of the host command to
- * ucode is replied.
+ * application; otherwise, waits for the host command to be sent and checks
+ * the return code. In case or error, it is returned, otherwise a reply is
+ * allocated and the reply RX packet
+ * is returned.
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_host_cmd cmd;
+ struct iwl_rx_packet *pkt;
+ struct sk_buff *skb;
+ void *reply_buf;
+ u32 reply_len;
+ int ret;
+ bool cmd_want_skb;
memset(&cmd, 0, sizeof(struct iwl_host_cmd));
if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
!tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
- IWL_DEBUG_INFO(priv,
- "Error finding ucode command mandatory fields\n");
+ IWL_ERR(priv, "Missing ucode command mandatory fields\n");
return -ENOMSG;
}
- cmd.flags = CMD_ON_DEMAND;
+ cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
+ cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
+ if (cmd_want_skb)
+ cmd.flags |= CMD_WANT_SKB;
+
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
+ IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
- /* ok, let's submit the command to ucode */
- return iwl_trans_send_cmd(trans(priv), &cmd);
+
+ ret = iwl_dvm_send_cmd(priv, &cmd);
+ if (ret) {
+ IWL_ERR(priv, "Failed to send hcmd\n");
+ return ret;
+ }
+ if (!cmd_want_skb)
+ return ret;
+
+ /* Handling return of SKB to the user */
+ pkt = cmd.resp_pkt;
+ if (!pkt) {
+ IWL_ERR(priv, "HCMD received a null response packet\n");
+ return ret;
+ }
+
+ reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
+ reply_buf = kmalloc(reply_len, GFP_KERNEL);
+ if (!skb || !reply_buf) {
+ kfree_skb(skb);
+ kfree(reply_buf);
+ return -ENOMEM;
+ }
+
+ /* The reply is in a page, that we cannot send to user space. */
+ memcpy(reply_buf, &(pkt->hdr), reply_len);
+ iwl_free_resp(&cmd);
+
+ NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
+ NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf);
+ return cfg80211_testmode_reply(skb);
+
+nla_put_failure:
+ IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
+ return -ENOMSG;
}
@@ -284,84 +343,69 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
*/
static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
{
- struct iwl_priv *priv = hw->priv;
- u32 ofs, val32;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ u32 ofs, val32, cmd;
u8 val8;
struct sk_buff *skb;
int status = 0;
if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
- IWL_DEBUG_INFO(priv, "Error finding register offset\n");
+ IWL_ERR(priv, "Missing register offset\n");
return -ENOMSG;
}
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ /* Allow access only to FH/CSR/HBUS in direct mode.
+ Since we don't have the upper bounds for the CSR and HBUS segments,
+ we will use only the upper bound of FH for sanity check. */
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
+ cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
+ cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
+ (ofs >= FH_MEM_UPPER_BOUND)) {
+ IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
+ FH_MEM_UPPER_BOUND);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- val32 = iwl_read32(bus(priv), ofs);
+ val32 = iwl_read_direct32(trans(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
- IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
status = cfg80211_testmode_reply(skb);
if (status < 0)
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n", status);
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
- IWL_DEBUG_INFO(priv,
- "Error finding value to write\n");
+ IWL_ERR(priv, "Missing value to write\n");
return -ENOMSG;
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
- iwl_write32(bus(priv), ofs, val32);
+ iwl_write_direct32(trans(priv), ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
- IWL_DEBUG_INFO(priv, "Error finding value to write\n");
+ IWL_ERR(priv, "Missing value to write\n");
return -ENOMSG;
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
- iwl_write8(bus(priv), ofs, val8);
- }
- break;
- case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
- val32 = iwl_read_prph(bus(priv), ofs);
- IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_DEBUG_INFO(priv, "Error allocating memory\n");
- return -ENOMEM;
- }
- NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n", status);
- break;
- case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
- if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
- IWL_DEBUG_INFO(priv,
- "Error finding value to write\n");
- return -ENOMSG;
- } else {
- val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
- IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
- iwl_write_prph(bus(priv), ofs, val32);
+ iwl_write8(trans(priv), ofs, val8);
}
break;
default:
- IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
+ IWL_ERR(priv, "Unknown testmode register command ID\n");
return -ENOSYS;
}
@@ -378,24 +422,23 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- iwl_init_notification_wait(priv->shrd, &calib_wait,
- CALIBRATION_COMPLETE_NOTIFICATION,
- NULL, NULL);
- ret = iwl_init_alive_start(trans(priv));
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ CALIBRATION_COMPLETE_NOTIFICATION,
+ NULL, NULL);
+ ret = iwl_init_alive_start(priv);
if (ret) {
- IWL_DEBUG_INFO(priv,
- "Error configuring init calibration: %d\n", ret);
+ IWL_ERR(priv, "Fail init calibration: %d\n", ret);
goto cfg_init_calib_error;
}
- ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
if (ret)
- IWL_DEBUG_INFO(priv, "Error detecting"
+ IWL_ERR(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
- iwl_remove_notification(priv->shrd, &calib_wait);
+ iwl_remove_notification(&priv->notif_wait, &calib_wait);
return ret;
}
@@ -417,12 +460,13 @@ cfg_init_calib_error:
*/
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_trans *trans = trans(priv);
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
- u32 devid;
+ u32 devid, inst_size = 0, data_size = 0;
+ const struct fw_img *img;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
@@ -431,8 +475,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
rsp_data_len + 20);
if (!skb) {
- IWL_DEBUG_INFO(priv,
- "Error allocating memory\n");
+ IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
@@ -441,47 +484,47 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
rsp_data_len, rsp_data_ptr);
status = cfg80211_testmode_reply(skb);
if (status < 0)
- IWL_DEBUG_INFO(priv, "Error sending msg : %d\n",
- status);
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (status)
- IWL_DEBUG_INFO(priv,
- "Error loading init ucode: %d\n", status);
+ IWL_ERR(priv, "Error loading init ucode: %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
+ priv->ucode_loaded = false;
iwl_trans_stop_device(trans);
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (status) {
- IWL_DEBUG_INFO(priv,
+ IWL_ERR(priv,
"Error loading runtime ucode: %d\n", status);
break;
}
status = iwl_alive_start(priv);
if (status)
- IWL_DEBUG_INFO(priv,
+ IWL_ERR(priv,
"Error starting the device: %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
iwl_scan_cancel_timeout(priv, 200);
+ priv->ucode_loaded = false;
iwl_trans_stop_device(trans);
- status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
if (status) {
- IWL_DEBUG_INFO(priv,
+ IWL_ERR(priv,
"Error loading WOWLAN ucode: %d\n", status);
break;
}
status = iwl_alive_start(priv);
if (status)
- IWL_DEBUG_INFO(priv,
+ IWL_ERR(priv,
"Error starting the device: %d\n", status);
break;
@@ -490,8 +533,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
cfg(priv)->base_params->eeprom_size + 20);
if (!skb) {
- IWL_DEBUG_INFO(priv,
- "Error allocating memory\n");
+ IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
@@ -501,55 +543,75 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
priv->shrd->eeprom);
status = cfg80211_testmode_reply(skb);
if (status < 0)
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n",
- status);
+ IWL_ERR(priv, "Error sending msg : %d\n",
+ status);
} else
return -EFAULT;
break;
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
if (!tb[IWL_TM_ATTR_FIXRATE]) {
- IWL_DEBUG_INFO(priv,
- "Error finding fixrate setting\n");
+ IWL_ERR(priv, "Missing fixrate setting\n");
return -ENOMSG;
}
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+ IWL_INFO(priv, "uCode version raw: 0x%x\n",
+ priv->fw->ucode_ver);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
- IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
- NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION,
+ priv->fw->ucode_ver);
status = cfg80211_testmode_reply(skb);
if (status < 0)
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n", status);
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- devid = bus_get_hw_id(bus(priv));
+ devid = trans(priv)->hw_id;
IWL_INFO(priv, "hw version: 0x%x\n", devid);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
if (!skb) {
- IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
status = cfg80211_testmode_reply(skb);
if (status < 0)
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n", status);
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (!priv->ucode_loaded) {
+ IWL_ERR(priv, "No uCode has not been loaded\n");
+ return -EINVAL;
+ } else {
+ img = &priv->fw->img[priv->shrd->ucode_type];
+ inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
+ data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type);
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size);
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
default:
- IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
+ IWL_ERR(priv, "Unknown testmode driver command ID\n");
return -ENOSYS;
}
return status;
@@ -574,10 +636,10 @@ nla_put_failure:
*/
static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct sk_buff *skb;
int status = 0;
- struct device *dev = bus(priv)->dev;
+ struct device *dev = trans(priv)->dev;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
@@ -612,8 +674,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
sizeof(priv->testmode_trace.dma_addr) + 20);
if (!skb) {
- IWL_DEBUG_INFO(priv,
- "Error allocating memory\n");
+ IWL_ERR(priv, "Memory allocation fail\n");
iwl_trace_cleanup(priv);
return -ENOMEM;
}
@@ -622,9 +683,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
(u64 *)&priv->testmode_trace.dma_addr);
status = cfg80211_testmode_reply(skb);
if (status < 0) {
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n",
- status);
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
}
priv->testmode_trace.num_chunks =
DIV_ROUND_UP(priv->testmode_trace.buff_size,
@@ -635,7 +694,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_trace_cleanup(priv);
break;
default:
- IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
+ IWL_ERR(priv, "Unknown testmode mem command ID\n");
return -ENOSYS;
}
return status;
@@ -648,11 +707,11 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int idx, length;
if (priv->testmode_trace.trace_enabled &&
@@ -696,24 +755,105 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
*/
static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
u8 owner;
if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
- IWL_DEBUG_INFO(priv, "Error finding ucode owner\n");
+ IWL_ERR(priv, "Missing ucode owner\n");
return -ENOMSG;
}
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
- if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
- priv->shrd->ucode_owner = owner;
- else {
- IWL_DEBUG_INFO(priv, "Invalid owner\n");
+ if (owner == IWL_OWNERSHIP_DRIVER) {
+ priv->ucode_owner = owner;
+ priv->pre_rx_handler = NULL;
+ } else if (owner == IWL_OWNERSHIP_TM) {
+ priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+ priv->ucode_owner = owner;
+ } else {
+ IWL_ERR(priv, "Invalid owner\n");
return -EINVAL;
}
return 0;
}
+static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
+{
+ struct iwl_trans *trans = trans(priv);
+ unsigned long flags;
+ int i;
+
+ if (size & 0x3)
+ return -EINVAL;
+ priv->testmode_mem.buff_size = size;
+ priv->testmode_mem.buff_addr =
+ kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
+ if (priv->testmode_mem.buff_addr == NULL)
+ return -ENOMEM;
+
+ /* Hard-coded periphery absolute address */
+ if (IWL_TM_ABS_PRPH_START <= addr &&
+ addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
+ addr | (3 << 24));
+ for (i = 0; i < size; i += 4)
+ *(u32 *)(priv->testmode_mem.buff_addr + i) =
+ iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
+ iwl_release_nic_access(trans);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ } else { /* target memory (SRAM) */
+ _iwl_read_targ_mem_words(trans, addr,
+ priv->testmode_mem.buff_addr,
+ priv->testmode_mem.buff_size / 4);
+ }
+
+ priv->testmode_mem.num_chunks =
+ DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
+ priv->testmode_mem.read_in_progress = true;
+ return 0;
+
+}
+
+static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
+ u32 size, unsigned char *buf)
+{
+ struct iwl_trans *trans = trans(priv);
+ u32 val, i;
+ unsigned long flags;
+
+ if (IWL_TM_ABS_PRPH_START <= addr &&
+ addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
+ /* Periphery writes can be 1-3 bytes long, or DWORDs */
+ if (size < 4) {
+ memcpy(&val, buf, size);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
+ (addr & 0x0000FFFF) |
+ ((size - 1) << 24));
+ iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+ iwl_release_nic_access(trans);
+ /* needed after consecutive writes w/o read */
+ mmiowb();
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ } else {
+ if (size % 4)
+ return -EINVAL;
+ for (i = 0; i < size; i += 4)
+ iwl_write_prph(trans, addr+i,
+ *(u32 *)(buf+i));
+ }
+ } else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
+ (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
+ addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
+ _iwl_write_targ_mem_words(trans, addr, buf, size/4);
+ } else
+ return -EINVAL;
+ return 0;
+}
+
/*
* This function handles the user application commands for SRAM data dump
*
@@ -730,83 +870,60 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
-static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
+ struct nlattr **tb)
{
- struct iwl_priv *priv = hw->priv;
- u32 base, ofs, size, maxsize;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ u32 addr, size, cmd;
+ unsigned char *buf;
- if (priv->testmode_sram.sram_readed)
+ /* Both read and write should be blocked, for atomicity */
+ if (priv->testmode_mem.read_in_progress)
return -EBUSY;
- if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
- IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
+ IWL_ERR(priv, "Error finding memory offset address\n");
return -ENOMSG;
}
- ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
- if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
- IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+ addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
+ if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
+ IWL_ERR(priv, "Error finding size for memory reading\n");
return -ENOMSG;
}
- size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
- switch (priv->shrd->ucode_type) {
- case IWL_UCODE_REGULAR:
- maxsize = trans(priv)->ucode_rt.data.len;
- break;
- case IWL_UCODE_INIT:
- maxsize = trans(priv)->ucode_init.data.len;
- break;
- case IWL_UCODE_WOWLAN:
- maxsize = trans(priv)->ucode_wowlan.data.len;
- break;
- case IWL_UCODE_NONE:
- IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
- return -ENOSYS;
- default:
- IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
- return -ENOSYS;
- }
- if ((ofs + size) > maxsize) {
- IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
- return -EINVAL;
- }
- priv->testmode_sram.buff_size = (size / 4) * 4;
- priv->testmode_sram.buff_addr =
- kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
- if (priv->testmode_sram.buff_addr == NULL) {
- IWL_DEBUG_INFO(priv, "Error allocating memory\n");
- return -ENOMEM;
+ size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
+
+ if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
+ return iwl_testmode_indirect_read(priv, addr, size);
+ else {
+ if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
+ return -EINVAL;
+ buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
+ return iwl_testmode_indirect_write(priv, addr, size, buf);
}
- base = 0x800000;
- _iwl_read_targ_mem_words(bus(priv), base + ofs,
- priv->testmode_sram.buff_addr,
- priv->testmode_sram.buff_size / 4);
- priv->testmode_sram.num_chunks =
- DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
- priv->testmode_sram.sram_readed = true;
- return 0;
}
-static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
- struct sk_buff *skb,
- struct netlink_callback *cb)
+static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
{
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int idx, length;
- if (priv->testmode_sram.sram_readed) {
+ if (priv->testmode_mem.read_in_progress) {
idx = cb->args[4];
- if (idx >= priv->testmode_sram.num_chunks) {
- iwl_sram_cleanup(priv);
+ if (idx >= priv->testmode_mem.num_chunks) {
+ iwl_mem_cleanup(priv);
return -ENOENT;
}
length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == priv->testmode_sram.num_chunks) &&
- (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
- length = priv->testmode_sram.buff_size %
+ if (((idx + 1) == priv->testmode_mem.num_chunks) &&
+ (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
+ length = priv->testmode_mem.buff_size %
DUMP_CHUNK_SIZE;
- NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
- priv->testmode_sram.buff_addr +
+ NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
+ priv->testmode_mem.buff_addr +
(DUMP_CHUNK_SIZE * idx));
idx++;
cb->args[4] = idx;
@@ -818,6 +935,20 @@ static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
return -ENOBUFS;
}
+static int iwl_testmode_notifications(struct ieee80211_hw *hw,
+ struct nlattr **tb)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ bool enable;
+
+ enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
+ if (enable)
+ priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
+ else
+ priv->pre_rx_handler = NULL;
+ return 0;
+}
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -841,24 +972,23 @@ static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int result;
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
iwl_testmode_gnl_msg_policy);
if (result != 0) {
- IWL_DEBUG_INFO(priv,
- "Error parsing the gnl message : %d\n", result);
+ IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
return result;
}
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_DEBUG_INFO(priv, "Error finding testmode command type\n");
+ IWL_ERR(priv, "Missing testmode command type\n");
return -ENOMSG;
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
@@ -868,8 +998,6 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
- case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
result = iwl_testmode_reg(hw, tb);
break;
@@ -882,6 +1010,7 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
@@ -898,18 +1027,26 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
result = iwl_testmode_ownership(hw, tb);
break;
- case IWL_TM_CMD_APP2DEV_READ_SRAM:
- IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
- result = iwl_testmode_sram(hw, tb);
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
+ IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
+ "to driver\n");
+ result = iwl_testmode_indirect_mem(hw, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ IWL_DEBUG_INFO(priv, "testmode notifications cmd "
+ "to driver\n");
+ result = iwl_testmode_notifications(hw, tb);
break;
default:
- IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
+ IWL_ERR(priv, "Unknown testmode command\n");
result = -ENOSYS;
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return result;
}
@@ -918,7 +1055,7 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
void *data, int len)
{
struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = hw->priv;
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int result;
u32 cmd;
@@ -929,15 +1066,14 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
iwl_testmode_gnl_msg_policy);
if (result) {
- IWL_DEBUG_INFO(priv,
- "Error parsing the gnl message : %d\n", result);
+ IWL_ERR(priv,
+ "Error parsing the gnl message : %d\n", result);
return result;
}
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_DEBUG_INFO(priv,
- "Error finding testmode command type\n");
+ IWL_ERR(priv, "Missing testmode command type\n");
return -ENOMSG;
}
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
@@ -945,21 +1081,21 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
}
/* in case multiple accesses to the device happens */
- mutex_lock(&priv->shrd->mutex);
+ mutex_lock(&priv->mutex);
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
- result = iwl_testmode_trace_dump(hw, tb, skb, cb);
+ result = iwl_testmode_trace_dump(hw, skb, cb);
break;
- case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
- result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+ result = iwl_testmode_buffer_dump(hw, skb, cb);
break;
default:
result = -EINVAL;
break;
}
- mutex_unlock(&priv->shrd->mutex);
+ mutex_unlock(&priv->mutex);
return result;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 26138f110340..6ba211b09426 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -99,7 +99,7 @@
* to user application
* @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
* commands from kernel space to multicast the spontaneous messages
- * to user application
+ * to user application, or reply of host commands
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
* commands from kernel space to carry the eeprom response
* to user application
@@ -109,18 +109,22 @@
* if application has the ownership, the only host command from
* testmode will deliver to uCode. Default owner is driver
*
- * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
- * commands from user applicaiton to indirectly access peripheral register
- *
- * @IWL_TM_CMD_APP2DEV_READ_SRAM:
- * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
- * commands from user applicaiton to read data in sram
- *
- * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Wake On Wireless LAN uCode image
* @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
* @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
- *
+ * @IWL_TM_CMD_APP2DEV_GET_FW_INFO:
+ * retrieve information of existing loaded uCode image
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
+ * Commands to read/write data from periphery or SRAM memory ranges.
+ * Fore reading, a READ command is sent from the userspace and the data
+ * is returned when the user calls a DUMP command.
+ * For writing, only a WRITE command is used.
+ * @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ * Command to enable/disable notifications (currently RX packets) from the
+ * driver to userspace.
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
@@ -140,14 +144,19 @@ enum iwl_tm_cmd_t {
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
- IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18,
- IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
- IWL_TM_CMD_APP2DEV_READ_SRAM = 20,
- IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21,
+ RESERVED_18 = 18,
+ RESERVED_19 = 19,
+ RESERVED_20 = 20,
+ RESERVED_21 = 21,
IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
- IWL_TM_CMD_MAX = 25,
+ IWL_TM_CMD_APP2DEV_GET_FW_INFO = 25,
+ IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
+ IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
+ IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
+ IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
+ IWL_TM_CMD_MAX = 30,
};
/*
@@ -168,8 +177,6 @@ enum iwl_tm_cmd_t {
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
* The mandatory fields are :
* IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
- * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands;
- * The optional fields are:
* IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
* to the ucode
*
@@ -218,16 +225,19 @@ enum iwl_tm_cmd_t {
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_OWNER for the new owner
*
- * @IWL_TM_ATTR_SRAM_ADDR:
- * @IWL_TM_ATTR_SRAM_SIZE:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ * @IWL_TM_ATTR_MEM_ADDR:
+ * @IWL_TM_ATTR_BUFFER_SIZE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
+ * or IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE.
* The mandatory fields are:
- * IWL_TM_ATTR_SRAM_ADDR for the address in sram
- * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ * IWL_TM_ATTR_MEM_ADDR for the address in SRAM/periphery to read/write
+ * IWL_TM_ATTR_BUFFER_SIZE for the buffer size of data to read/write.
*
- * @IWL_TM_ATTR_SRAM_DUMP:
- * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
- * IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ * @IWL_TM_ATTR_BUFFER_DUMP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP,
+ * IWL_TM_ATTR_BUFFER_DUMP is used for the data that was read.
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE,
+ * this attribute contains the data to write.
*
* @IWL_TM_ATTR_FW_VERSION:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
@@ -237,6 +247,23 @@ enum iwl_tm_cmd_t {
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
* IWL_TM_ATTR_DEVICE_ID for the device ID information
*
+ * @IWL_TM_ATTR_FW_TYPE:
+ * @IWL_TM_ATTR_FW_INST_SIZE:
+ * @IWL_TM_ATTR_FW_DATA_SIZE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_INFO,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_FW_TYPE for the uCode type (INIT/RUNTIME/...)
+ * IWL_TM_ATTR_FW_INST_SIZE for the size of instruction section
+ * IWL_TM_ATTR_FW_DATA_SIZE for the size of data section
+ *
+ * @IWL_TM_ATTR_UCODE_CMD_SKB:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
+ * indicates that the user wants to receive the response of the command
+ * in a reply SKB. If it's not present, the response is not returned.
+ * @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
+ * flag enables (if present) or disables (if not) the forwarding
+ * to userspace.
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -254,12 +281,17 @@ enum iwl_tm_attr_t {
IWL_TM_ATTR_TRACE_DUMP = 12,
IWL_TM_ATTR_FIXRATE = 13,
IWL_TM_ATTR_UCODE_OWNER = 14,
- IWL_TM_ATTR_SRAM_ADDR = 15,
- IWL_TM_ATTR_SRAM_SIZE = 16,
- IWL_TM_ATTR_SRAM_DUMP = 17,
+ IWL_TM_ATTR_MEM_ADDR = 15,
+ IWL_TM_ATTR_BUFFER_SIZE = 16,
+ IWL_TM_ATTR_BUFFER_DUMP = 17,
IWL_TM_ATTR_FW_VERSION = 18,
IWL_TM_ATTR_DEVICE_ID = 19,
- IWL_TM_ATTR_MAX = 20,
+ IWL_TM_ATTR_FW_TYPE = 20,
+ IWL_TM_ATTR_FW_INST_SIZE = 21,
+ IWL_TM_ATTR_FW_DATA_SIZE = 22,
+ IWL_TM_ATTR_UCODE_CMD_SKB = 23,
+ IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
+ IWL_TM_ATTR_MAX = 25,
};
/* uCode trace buffer */
@@ -271,4 +303,7 @@ enum iwl_tm_attr_t {
/* Maximum data size of each dump it packet */
#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
+/* Address offset of data segment in SRAM */
+#define SRAM_DATA_SEG_OFFSET 0x800000
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index f6debf91d7b5..1c2fe87bd7e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
+#include <linux/wait.h>
#include <linux/pci.h>
#include "iwl-fh.h"
@@ -40,6 +41,7 @@
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "iwl-op-mode.h"
struct iwl_tx_queue;
struct iwl_queue;
@@ -48,6 +50,12 @@ struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
* trans_pcie layer */
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
/**
* struct isr_statistics - interrupt statistics
*
@@ -108,6 +116,26 @@ struct iwl_dma_ptr {
size_t size;
};
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_inc_wrap(int index, int n_bd)
+{
+ return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_dec_wrap(int index, int n_bd)
+{
+ return --index & (n_bd - 1);
+}
+
/*
* This queue number is required for proper operation
* because the ucode will stop/start the scheduler as
@@ -168,6 +196,7 @@ struct iwl_queue {
* @meta: array of meta data for each command/tx buffer
* @dma_addr_cmd: physical address of cmd/tx buffer array
* @txb: array of per-TFD driver data
+ * lock: queue lock
* @time_stamp: time (in jiffies) of last read_ptr change
* @need_update: indicates need to update read/write index
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
@@ -186,6 +215,7 @@ struct iwl_tx_queue {
struct iwl_device_cmd **cmd;
struct iwl_cmd_meta *meta;
struct sk_buff **skbs;
+ spinlock_t lock;
unsigned long time_stamp;
u8 need_update;
u8 sched_retry;
@@ -201,6 +231,8 @@ struct iwl_tx_queue {
* @rxq: all the RX queue data
* @rx_replenish: work that will be called when buffers need to be allocated
* @trans: pointer to the generic transport area
+ * @irq - the irq number for the device
+ * @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
@@ -211,6 +243,12 @@ struct iwl_tx_queue {
* @txq_ctx_active_msk: what queue is active
* queue_stopped: tracks what queue is stopped
* queue_stop_count: tracks what SW queue is stopped
+ * @pci_dev: basic pci-network driver stuff
+ * @hw_base: pci hardware address support
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_write_waitq: wait queue for uCode load
+ * @status - transport specific status flags
+ * @cmd_queue - command queue number
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
@@ -223,9 +261,12 @@ struct iwl_trans_pcie {
int ict_index;
u32 inta;
bool use_ict;
+ bool irq_requested;
struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
+ unsigned int irq;
+ spinlock_t irq_lock;
u32 inta_mask;
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
@@ -241,6 +282,17 @@ struct iwl_trans_pcie {
#define IWL_MAX_HW_QUEUES 32
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
atomic_t queue_stop_count[4];
+
+ /* PCI bus related data */
+ struct pci_dev *pci_dev;
+ void __iomem *hw_base;
+
+ bool ucode_write_complete;
+ wait_queue_head_t ucode_write_waitq;
+ unsigned long status;
+ u8 cmd_queue;
+ u8 n_no_reclaim_cmds;
+ u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -258,7 +310,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
/*****************************************************
* ICT
******************************************************/
-int iwl_reset_ict(struct iwl_trans *trans);
+void iwl_reset_ict(struct iwl_trans *trans);
void iwl_disable_ict(struct iwl_trans *trans);
int iwl_alloc_isr_ict(struct iwl_trans *trans);
void iwl_free_isr_ict(struct iwl_trans *trans);
@@ -275,7 +327,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_tx_cmd_complete(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb, int handler_status);
+ struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
@@ -308,26 +360,32 @@ void iwl_dump_csr(struct iwl_trans *trans);
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
/* disable interrupts from uCode/NIC to host */
- iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
/* acknowledge/clear/reset any interrupts still pending
* from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(bus(trans), CSR_INT, 0xffffffff);
- iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
+ iwl_write32(trans, CSR_INT, 0xffffffff);
+ iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
- iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
+ set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
+static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+{
+ IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
+ iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
}
/*
@@ -355,7 +413,7 @@ static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, const char *msg)
+ struct iwl_tx_queue *txq)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -365,20 +423,20 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
- iwl_wake_sw_queue(priv(trans), ac);
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
- hwq, ac, msg);
+ iwl_op_mode_queue_not_full(trans->op_mode, ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
+ hwq, ac);
} else {
- IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
- " stop count %d. %s",
- hwq, ac, atomic_read(&trans_pcie->
- queue_stop_count[ac]), msg);
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Don't wake hwq %d ac %d stop count %d",
+ hwq, ac,
+ atomic_read(&trans_pcie->queue_stop_count[ac]));
}
}
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, const char *msg)
+ struct iwl_tx_queue *txq)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -388,35 +446,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
- iwl_stop_sw_queue(priv(trans), ac);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
- " stop count %d. %s",
- hwq, ac, atomic_read(&trans_pcie->
- queue_stop_count[ac]), msg);
+ iwl_op_mode_queue_full(trans->op_mode, ac);
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Stop hwq %d ac %d stop count %d",
+ hwq, ac,
+ atomic_read(&trans_pcie->queue_stop_count[ac]));
} else {
- IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
- " stop count %d. %s",
- hwq, ac, atomic_read(&trans_pcie->
- queue_stop_count[ac]), msg);
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Don't stop hwq %d ac %d stop count %d",
+ hwq, ac,
+ atomic_read(&trans_pcie->queue_stop_count[ac]));
}
} else {
- IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
- hwq, msg);
+ IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
+ hwq);
}
}
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
int txq_id)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 65d1f05007be..8b1a7988e176 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -30,10 +30,14 @@
#include <linux/wait.h>
#include <linux/gfp.h>
-/*TODO: Remove include to iwl-core.h*/
-#include "iwl-core.h"
+#include "iwl-prph.h"
#include "iwl-io.h"
#include "iwl-trans-pcie-int.h"
+#include "iwl-op-mode.h"
+
+#ifdef CONFIG_IWLWIFI_IDI
+#include "iwl-amfh.h"
+#endif
/******************************************************************************
*
@@ -136,34 +140,34 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
if (q->need_update == 0)
goto exit_unlock;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* shadow register enabled */
/* Device expects a multiple of 8 */
q->write_actual = (q->write & ~0x7);
- iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
} else {
/* If power-saving is in use, make sure device is awake */
if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
- reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
IWL_DEBUG_INFO(trans,
"Rx queue requesting wakeup,"
" GP1 = 0x%x\n", reg);
- iwl_set_bit(bus(trans), CSR_GP_CNTRL,
+ iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
goto exit_unlock;
}
q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
q->write_actual);
/* Else device is assumed to be awake */
} else {
/* Device expects a multiple of 8 */
q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
q->write_actual);
}
}
@@ -223,7 +227,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
+ schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
@@ -308,7 +312,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
BUG_ON(rxb->page);
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
+ rxb->page_dma = dma_map_page(trans->dev, page, 0,
PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
/* dma address must be no more than 36 bits */
@@ -327,13 +331,14 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
void iwlagn_rx_replenish(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
iwlagn_rx_allocate(trans, GFP_KERNEL);
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwlagn_rx_queue_restock(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
@@ -347,14 +352,108 @@ void iwl_bg_rx_replenish(struct work_struct *data)
{
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- struct iwl_trans *trans = trans_pcie->trans;
- if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ iwlagn_rx_replenish(trans_pcie->trans);
+}
+
+static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_device_cmd *cmd;
+ unsigned long flags;
+ int len, err;
+ u16 sequence;
+ struct iwl_rx_cmd_buffer rxcb;
+ struct iwl_rx_packet *pkt;
+ bool reclaim;
+ int index, cmd_index;
+
+ if (WARN_ON(!rxb))
return;
- mutex_lock(&trans->shrd->mutex);
- iwlagn_rx_replenish(trans);
- mutex_unlock(&trans->shrd->mutex);
+ dma_unmap_page(trans->dev, rxb->page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+
+ rxcb._page = rxb->page;
+ pkt = rxb_addr(&rxcb);
+
+ IWL_DEBUG_RX(trans, "%s, 0x%02x\n",
+ get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+ if (reclaim) {
+ int i;
+
+ for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
+ if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) {
+ reclaim = false;
+ break;
+ }
+ }
+ }
+
+ sequence = le16_to_cpu(pkt->hdr.sequence);
+ index = SEQ_TO_INDEX(sequence);
+ cmd_index = get_cmd_index(&txq->q, index);
+
+ if (reclaim)
+ cmd = txq->cmd[cmd_index];
+ else
+ cmd = NULL;
+
+ err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+
+ /*
+ * XXX: After here, we should always check rxcb._page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * iwl_trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxcb._page)
+ iwl_tx_cmd_complete(trans, &rxcb, err);
+ else
+ IWL_WARN(trans, "Claim null rxb?\n");
+ }
+
+ /* page was stolen from us */
+ if (rxcb._page == NULL)
+ rxb->page = NULL;
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ dma_map_page(trans->dev, rxb->page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ spin_unlock_irqrestore(&rxq->lock, flags);
}
/**
@@ -366,20 +465,12 @@ void iwl_bg_rx_replenish(struct work_struct *data)
*/
static void iwl_rx_handle(struct iwl_trans *trans)
{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
- struct iwl_device_cmd *cmd;
u32 r, i;
- int reclaim;
- unsigned long flags;
u8 fill_rx = 0;
u32 count = 8;
int total_empty;
- int index, cmd_index;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
@@ -399,102 +490,14 @@ static void iwl_rx_handle(struct iwl_trans *trans)
fill_rx = 1;
while (i != r) {
- int len, err;
- u16 sequence;
+ struct iwl_rx_mem_buffer *rxb;
rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- if (WARN_ON(rxb == NULL)) {
- i = (i + 1) & RX_QUEUE_MASK;
- continue;
- }
-
rxq->queue[i] = NULL;
- dma_unmap_page(bus(trans)->dev, rxb->page_dma,
- PAGE_SIZE << hw_params(trans).rx_page_order,
- DMA_FROM_DEVICE);
- pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv(trans), pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- sequence = le16_to_cpu(pkt->hdr.sequence);
- index = SEQ_TO_INDEX(sequence);
- cmd_index = get_cmd_index(&txq->q, index);
-
- if (reclaim)
- cmd = txq->cmd[cmd_index];
- else
- cmd = NULL;
-
- /* warn if this is cmd response / notification and the uCode
- * didn't set the SEQ_RX_FRAME for a frame that is
- * uCode-originated
- * If you saw this code after the second half of 2012, then
- * please remove it
- */
- WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false &&
- (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
- "reclaim is false, SEQ_RX_FRAME unset: %s\n",
- get_cmd_string(pkt->hdr.cmd));
-
- err = iwl_rx_dispatch(priv(trans), rxb, cmd);
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking
- * iwl_trans_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_tx_cmd_complete(trans, rxb, err);
- else
- IWL_WARN(trans, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
- 0, PAGE_SIZE <<
- hw_params(trans).rx_page_order,
- DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
+ IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_handle_rxbuf(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
@@ -590,17 +593,16 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
{
u32 base;
struct iwl_error_event_table table;
- struct iwl_priv *priv = priv(trans);
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
base = trans->shrd->device_pointers.error_event_table;
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
- base = priv->init_errlog_ptr;
+ base = trans->shrd->fw->init_errlog_ptr;
} else {
if (!base)
- base = priv->inst_errlog_ptr;
+ base = trans->shrd->fw->inst_errlog_ptr;
}
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
@@ -612,7 +614,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
return;
}
- iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));
+ iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -622,7 +624,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
trans_pcie->isr_stats.err_code = table.error_id;
- trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
+ trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.line,
table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1,
@@ -670,12 +672,11 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
*/
static void iwl_irq_handle_error(struct iwl_trans *trans)
{
- struct iwl_priv *priv = priv(trans);
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (cfg(priv)->internal_wimax_coex &&
- (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
+ if (cfg(trans)->internal_wimax_coex &&
+ (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
- (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
+ (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
/*
* Keep the restart process from trying to send host
@@ -683,24 +684,20 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
*/
clear_bit(STATUS_READY, &trans->shrd->status);
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
- wake_up(&priv->shrd->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
IWL_ERR(trans, "RF is used by WiMAX\n");
return;
}
IWL_ERR(trans, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
+ trans->shrd->fw->fw_version);
iwl_dump_nic_error_log(trans);
iwl_dump_csr(trans);
iwl_dump_fh(trans, NULL, false);
iwl_dump_nic_event_log(trans, false, NULL, false);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
- iwl_print_rx_config_cmd(priv(trans), IWL_RXON_CTX_BSS);
-#endif
- iwlagn_fw_error(priv, false);
+ iwl_op_mode_nic_error(trans->op_mode);
}
#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -719,7 +716,6 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
unsigned long reg_flags;
- struct iwl_priv *priv = priv(trans);
if (num_events == 0)
return pos;
@@ -727,10 +723,10 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
base = trans->shrd->device_pointers.log_event_table;
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
- base = priv->init_evtlog_ptr;
+ base = trans->shrd->fw->init_evtlog_ptr;
} else {
if (!base)
- base = priv->inst_evtlog_ptr;
+ base = trans->shrd->fw->inst_evtlog_ptr;
}
if (mode == 0)
@@ -741,18 +737,18 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags);
- iwl_grab_nic_access(bus(trans));
+ spin_lock_irqsave(&trans->reg_lock, reg_flags);
+ if (unlikely(!iwl_grab_nic_access(trans)))
+ goto out_unlock;
/* Set starting address; reads will auto-increment */
- iwl_write32(bus(trans), HBUS_TARG_MEM_RADDR, ptr);
- rmb();
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
/* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing. */
for (i = 0; i < num_events; i++) {
- ev = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
- time = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
if (bufsz) {
@@ -760,13 +756,13 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
"EVT_LOG:0x%08x:%04u\n",
time, ev);
} else {
- trace_iwlwifi_dev_ucode_event(priv, 0,
+ trace_iwlwifi_dev_ucode_event(trans->dev, 0,
time, ev);
IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
time, ev);
}
} else {
- data = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
+ data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
if (bufsz) {
pos += scnprintf(*buf + pos, bufsz - pos,
"EVT_LOGT:%010u:0x%08x:%04u\n",
@@ -774,15 +770,16 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
} else {
IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time,
+ trace_iwlwifi_dev_ucode_event(trans->dev, time,
data, ev);
}
}
}
/* Allow device to power down */
- iwl_release_nic_access(bus(trans));
- spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags);
+ iwl_release_nic_access(trans);
+out_unlock:
+ spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
return pos;
}
@@ -836,17 +833,16 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
u32 logsize;
int pos = 0;
size_t bufsz = 0;
- struct iwl_priv *priv = priv(trans);
base = trans->shrd->device_pointers.log_event_table;
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
- logsize = priv->init_evtlog_size;
+ logsize = trans->shrd->fw->init_evtlog_size;
if (!base)
- base = priv->init_evtlog_ptr;
+ base = trans->shrd->fw->init_evtlog_ptr;
} else {
- logsize = priv->inst_evtlog_size;
+ logsize = trans->shrd->fw->inst_evtlog_size;
if (!base)
- base = priv->inst_evtlog_ptr;
+ base = trans->shrd->fw->inst_evtlog_ptr;
}
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
@@ -859,10 +855,10 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
}
/* event log header */
- capacity = iwl_read_targ_mem(bus(trans), base);
- mode = iwl_read_targ_mem(bus(trans), base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(bus(trans), base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(bus(trans), base + (3 * sizeof(u32)));
+ capacity = iwl_read_targ_mem(trans, base);
+ mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
+ num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
if (capacity > logsize) {
IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
@@ -885,7 +881,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
+ if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -905,7 +901,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
if (!*buf)
return -ENOMEM;
}
- if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
+ if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
* start at the oldest entry,
@@ -945,7 +941,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -958,15 +954,15 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(bus(trans), CSR_INT,
+ iwl_write32(trans, CSR_INT,
trans_pcie->inta | ~trans_pcie->inta_mask);
inta = trans_pcie->inta;
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
/* just for debug */
- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ inta_mask = iwl_read32(trans, CSR_INT_MASK);
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
inta, inta_mask);
}
@@ -975,7 +971,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* saved interrupt in inta variable now we can reset trans_pcie->inta */
trans_pcie->inta = 0;
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -993,7 +989,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
@@ -1013,30 +1009,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
+ bool hw_rfkill;
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
+ hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW,
- &trans->shrd->status);
- else
- clear_bit(STATUS_RF_KILL_HW,
- &trans->shrd->status);
- iwl_set_hw_rfkill_state(priv(trans), hw_rf_kill);
- }
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -1061,7 +1043,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < hw_params(trans).max_txq_num; i++)
+ for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++)
iwl_txq_update_write_ptr(trans,
&trans_pcie->txq[i]);
@@ -1078,12 +1060,12 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Rx interrupt\n");
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- iwl_write32(bus(trans), CSR_FH_INT_STATUS,
+ iwl_write32(trans, CSR_FH_INT_STATUS,
CSR_FH_INT_RX_MASK);
}
if (inta & CSR_INT_BIT_RX_PERIODIC) {
handled |= CSR_INT_BIT_RX_PERIODIC;
- iwl_write32(bus(trans),
+ iwl_write32(trans,
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
}
/* Sending RX interrupt require many steps to be done in the
@@ -1098,10 +1080,13 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
*/
/* Disable periodic interrupt; we use it as just a one-shot. */
- iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
+ iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
+#ifdef CONFIG_IWLWIFI_IDI
+ iwl_amfh_rx_handler();
+#else
iwl_rx_handle(trans);
-
+#endif
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
@@ -1110,7 +1095,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* to extend the periodic interrupt; one-shot is enough.
*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
- iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
+ iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
@@ -1118,13 +1103,13 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* This "Tx" DMA channel is used only for loading uCode */
if (inta & CSR_INT_BIT_FH_TX) {
- iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+ iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
isr_stats->tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
- trans->ucode_write_complete = 1;
- wake_up(&trans->shrd->wait_command_queue);
+ trans_pcie->ucode_write_complete = true;
+ wake_up(&trans_pcie->ucode_write_waitq);
}
if (inta & ~handled) {
@@ -1139,11 +1124,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
+ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(priv(trans));
+ iwl_enable_rfkill_int(trans);
}
/******************************************************************************
@@ -1164,7 +1149,7 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans_pcie->ict_tbl) {
- dma_free_coherent(bus(trans)->dev, ICT_SIZE,
+ dma_free_coherent(trans->dev, ICT_SIZE,
trans_pcie->ict_tbl,
trans_pcie->ict_tbl_dma);
trans_pcie->ict_tbl = NULL;
@@ -1184,7 +1169,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
+ dma_alloc_coherent(trans->dev, ICT_SIZE,
&trans_pcie->ict_tbl_dma,
GFP_KERNEL);
if (!trans_pcie->ict_tbl)
@@ -1213,7 +1198,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
/* Device is going up inform it about using ICT interrupt table,
* also we need to tell the driver to start using ICT interrupt.
*/
-int iwl_reset_ict(struct iwl_trans *trans)
+void iwl_reset_ict(struct iwl_trans *trans)
{
u32 val;
unsigned long flags;
@@ -1221,9 +1206,9 @@ int iwl_reset_ict(struct iwl_trans *trans)
IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->ict_tbl)
- return 0;
+ return;
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1235,14 +1220,12 @@ int iwl_reset_ict(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
- iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
+ iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
trans_pcie->use_ict = true;
trans_pcie->ict_index = 0;
- iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask);
+ iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
-
- return 0;
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
/* Device is going down disable ict interrupt usage */
@@ -1253,9 +1236,9 @@ void iwl_disable_ict(struct iwl_trans *trans)
unsigned long flags;
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
trans_pcie->use_ict = false;
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
static irqreturn_t iwl_isr(int irq, void *data)
@@ -1270,21 +1253,21 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (!trans)
return IRQ_NONE;
- trace_iwlwifi_dev_irq(priv(trans));
+ trace_iwlwifi_dev_irq(trans->dev);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
- iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+ inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
/* Discover which interrupts are active/pending */
- inta = iwl_read32(bus(trans), CSR_INT);
+ inta = iwl_read32(trans, CSR_INT);
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
@@ -1302,8 +1285,8 @@ static irqreturn_t iwl_isr(int irq, void *data)
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
"fh 0x%08x\n", inta, inta_mask, inta_fh);
}
@@ -1313,22 +1296,22 @@ static irqreturn_t iwl_isr(int irq, void *data)
/* iwl_irq_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
unplugged:
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service. */
/* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_NONE;
}
@@ -1360,24 +1343,24 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (!trans_pcie->use_ict)
return iwl_isr(irq, data);
- trace_iwlwifi_dev_irq(priv(trans));
+ trace_iwlwifi_dev_irq(trans->dev);
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here.
*/
- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
- iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+ inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
if (!read) {
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
goto none;
@@ -1396,7 +1379,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index,
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
read);
} while (read);
@@ -1424,7 +1407,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
/* iwl_irq_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
@@ -1433,17 +1416,17 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
iwl_enable_interrupts(trans);
}
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service.
* only Re-enable if disabled by irq.
*/
- if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
+ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index bd29568177e6..e92972fd6ecf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -35,11 +35,49 @@
#include "iwl-prph.h"
#include "iwl-io.h"
#include "iwl-agn-hw.h"
+#include "iwl-op-mode.h"
#include "iwl-trans-pcie-int.h"
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific code), the AC->hw
+ * queue mapping is the identity mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+
/**
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
@@ -98,9 +136,9 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
if (txq->need_update == 0)
return;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* shadow register enabled */
- iwl_write32(bus(trans), HBUS_TARG_WRPTR,
+ iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
} else {
/* if we're trying to save power */
@@ -108,18 +146,18 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
/* wake up nic if it's powered down ...
* uCode will wake up, and interrupt us again, so next
* time we'll skip this part. */
- reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
IWL_DEBUG_INFO(trans,
"Tx queue %d requesting wakeup,"
" GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(bus(trans), CSR_GP_CNTRL,
+ iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
return;
}
- iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
/*
@@ -128,7 +166,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
* trying to tx (during RFKILL, we're not trying to tx).
*/
} else
- iwl_write32(bus(trans), HBUS_TARG_WRPTR,
+ iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
}
txq->need_update = 0;
@@ -190,14 +228,14 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
/* Unmap tx_cmd */
if (num_tbs)
- dma_unmap_single(bus(trans)->dev,
+ dma_unmap_single(trans->dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
- dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
+ dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
@@ -216,6 +254,8 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ lockdep_assert_held(&txq->lock);
+
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
/* free SKB */
@@ -229,7 +269,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
* freed and that the queue is not empty - free the skb
*/
if (skb) {
- iwl_free_skb(priv(trans), skb);
+ iwl_op_mode_free_skb(trans->op_mode, skb);
txq->skbs[index] = NULL;
}
}
@@ -357,7 +397,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans->shrd->cmd_queue)
+ if (txq_id != trans_pcie->cmd_queue)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -383,14 +423,14 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
- tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
+ tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
- iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
+ iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
return 0;
}
@@ -399,7 +439,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(bus(trans),
+ iwl_write_prph(trans,
SCD_QUEUE_STATUS_BITS(txq_id),
(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
@@ -409,9 +449,9 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
int txq_id, u32 index)
{
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
- iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
- iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
}
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
@@ -423,7 +463,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
int active =
test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
- iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
@@ -431,9 +471,21 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
txq->sched_retry = scd_retry;
- IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+ if (active)
+ IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
+ scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+ else
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
+ scd_retry ? "BA" : "AC/CMD", txq_id);
+}
+
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
}
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
@@ -478,7 +530,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
}
txq_id = trans_pcie->agg_txq[sta_id][tid];
- if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+ if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
@@ -489,7 +541,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
ra_tid = BUILD_RAxTID(sta_id, tid);
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Stop this Tx queue before configuring it */
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
@@ -498,10 +550,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
- iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
+ iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
/* enable aggregations for the queue */
- iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
+ iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -510,7 +562,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
sizeof(u32),
((frame_limit <<
@@ -520,7 +572,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
@@ -529,7 +581,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
trans_pcie->txq[txq_id].sta_id = sta_id;
trans_pcie->txq[txq_id].tid = tid;
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
/*
@@ -543,7 +595,8 @@ static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++)
if (!test_and_set_bit(txq_id,
&trans_pcie->txq_ctx_active_msk))
return txq_id;
@@ -573,7 +626,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
- if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+ if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
@@ -584,7 +637,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
- iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
+ iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
trans_pcie->agg_txq[sta_id][tid] = 0;
trans_pcie->txq[txq_id].q.read_ptr = 0;
@@ -592,7 +645,7 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
/* supposes that ssn_idx is valid (!= 0xFFF) */
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
- iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
return 0;
@@ -612,15 +665,13 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
- unsigned long flags;
u32 idx;
u16 copy_size, cmd_size;
- bool is_ct_kill = false;
bool had_nocopy = false;
int i;
u8 *cmd_dest;
@@ -635,12 +686,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
return -EIO;
}
- if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
- !(cmd->flags & CMD_ON_DEMAND)) {
- IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
- return -EIO;
- }
-
copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr);
@@ -670,23 +715,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
return -EINVAL;
- if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
- IWL_WARN(trans, "Not sending command - %s KILL\n",
- iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&trans->hcmd_lock, flags);
+ spin_lock_bh(&txq->lock);
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
- is_ct_kill = iwl_check_for_ct_kill(priv(trans));
- if (!is_ct_kill) {
- IWL_ERR(trans, "Restarting adapter queue is full\n");
- iwlagn_fw_error(priv(trans), false);
- }
+ iwl_op_mode_cmd_queue_full(trans->op_mode);
return -ENOSPC;
}
@@ -703,7 +738,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
out_cmd->hdr.cmd = cmd->id;
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
INDEX_TO_SEQ(q->write_ptr));
/* and copy the data that needs to be copied */
@@ -723,11 +758,11 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, trans->shrd->cmd_queue);
+ q->write_ptr, idx, trans_pcie->cmd_queue);
- phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
+ phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
+ if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
}
@@ -748,10 +783,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
- phys_addr = dma_map_single(bus(trans)->dev,
+ phys_addr = dma_map_single(trans->dev,
(void *)cmd->data[i],
cmd->len[i], DMA_BIDIRECTIONAL);
- if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
+ if (dma_mapping_error(trans->dev, phys_addr)) {
iwlagn_unmap_tfd(trans, out_meta,
&txq->tfds[q->write_ptr],
DMA_BIDIRECTIONAL);
@@ -775,7 +810,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
/* check that tracing gets all possible blocks */
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
trace_bufs[0], trace_lens[0],
trace_bufs[1], trace_lens[1],
trace_bufs[2], trace_lens[2]);
@@ -786,7 +821,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
iwl_txq_update_write_ptr(trans, txq);
out:
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock_bh(&txq->lock);
return idx;
}
@@ -805,6 +840,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
struct iwl_queue *q = &txq->q;
int nfreed = 0;
+ lockdep_assert_held(&txq->lock);
+
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
"index %d is out of range [0-%d] %d %d.\n", __func__,
@@ -818,7 +855,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
q->write_ptr, q->read_ptr);
- iwlagn_fw_error(priv(trans), false);
+ iwl_op_mode_nic_error(trans->op_mode);
}
}
@@ -834,7 +871,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
-void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
+void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
int handler_status)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -845,21 +882,22 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
- unsigned long flags;
+ struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans->shrd->cmd_queue,
+ if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->shrd->cmd_queue, sequence,
- trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
+ spin_lock(&txq->lock);
+
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
@@ -871,13 +909,14 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ struct page *p = rxb_steal_page(rxb);
+
+ meta->source->resp_pkt = pkt;
+ meta->source->_rx_page_addr = (unsigned long)page_address(p);
+ meta->source->_rx_page_order = hw_params(trans).rx_page_order;
meta->source->handler_status = handler_status;
- rxb->page = NULL;
}
- spin_lock_irqsave(&trans->hcmd_lock, flags);
-
iwl_hcmd_queue_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
@@ -889,12 +928,12 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->hdr.cmd));
- wake_up(&trans->shrd->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
}
meta->flags = 0;
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
@@ -908,12 +947,9 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
return -EINVAL;
- if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
- return -EBUSY;
-
ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
@@ -927,26 +963,22 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
int cmd_idx;
int ret;
- lockdep_assert_held(&trans->shrd->mutex);
-
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
- if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
- return -EBUSY;
-
-
- if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- return -ECANCELED;
- }
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
IWL_ERR(trans, "Command %s failed: FW Error\n",
get_cmd_string(cmd->id));
return -EIO;
}
- set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+
+ if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
+ &trans->shrd->status))) {
+ IWL_ERR(trans, "Command %s: a command is already active!\n",
+ get_cmd_string(cmd->id));
+ return -EIO;
+ }
+
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
@@ -954,27 +986,27 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
- ret = wait_event_timeout(trans->shrd->wait_command_queue,
+ ret = wait_event_timeout(trans->wait_command_queue,
!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
struct iwl_tx_queue *txq =
- &trans_pcie->txq[trans->shrd->cmd_queue];
+ &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_DEBUG_QUIET_RFKILL(trans,
+ IWL_ERR(trans,
"Current CMD queue read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
@@ -986,7 +1018,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
}
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
ret = -EIO;
@@ -1003,13 +1035,13 @@ cancel:
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
+ trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
- if (cmd->reply_page) {
- iwl_free_pages(trans->shrd, cmd->reply_page);
- cmd->reply_page = 0;
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
}
return ret;
@@ -1034,9 +1066,11 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
int freed = 0;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->shrd->cmd_queue))
+ if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return 0;
+ lockdep_assert_held(&txq->lock);
+
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 324d06dfb690..b4f796c82e1e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,8 +60,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
+#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
@@ -73,12 +76,18 @@
#include "iwl-eeprom.h"
#include "iwl-agn-hw.h"
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
+ (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+ (~(1<<(trans_pcie)->cmd_queue)))
+
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct device *dev = bus(trans)->dev;
+ struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
@@ -122,7 +131,7 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
- dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
@@ -146,17 +155,17 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
/* Stop Rx DMA */
- iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* Reset driver's Rx queue write index */
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
@@ -167,7 +176,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
* RB timeout 0x10
* 256 RBDs
*/
- iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
@@ -177,7 +186,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
/* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
static int iwl_rx_init(struct iwl_trans *trans)
@@ -215,10 +224,10 @@ static int iwl_rx_init(struct iwl_trans *trans)
iwl_trans_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
rxq->need_update = 1;
iwl_rx_queue_update_write_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return 0;
}
@@ -242,13 +251,13 @@ static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
iwl_trans_rxq_free_rx_bufs(trans);
spin_unlock_irqrestore(&rxq->lock, flags);
- dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
rxq->bd = NULL;
if (rxq->rb_stts)
- dma_free_coherent(bus(trans)->dev,
+ dma_free_coherent(trans->dev,
sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
else
@@ -261,8 +270,8 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
{
/* stop Rx DMA */
- iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
@@ -272,7 +281,7 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
if (WARN_ON(ptr->addr))
return -EINVAL;
- ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
&ptr->dma, GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
@@ -286,7 +295,7 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
if (unlikely(!ptr->addr))
return;
- dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
@@ -296,6 +305,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
{
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
return -EINVAL;
@@ -308,7 +318,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
if (!txq->meta || !txq->cmd)
goto error;
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++) {
txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
GFP_KERNEL);
@@ -319,7 +329,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
/* Alloc driver data array and TFD circular buffer */
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
- if (txq_id != trans->shrd->cmd_queue) {
+ if (txq_id != trans_pcie->cmd_queue) {
txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
GFP_KERNEL);
if (!txq->skbs) {
@@ -333,7 +343,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
- txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
&txq->q.dma_addr, GFP_KERNEL);
if (!txq->tfds) {
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
@@ -347,7 +357,7 @@ error:
txq->skbs = NULL;
/* since txq->cmd has been zeroed,
* all non allocated cmd[i] will be NULL */
- if (txq->cmd && txq_id == trans->shrd->cmd_queue)
+ if (txq->cmd && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
kfree(txq->cmd[i]);
kfree(txq->meta);
@@ -385,11 +395,13 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
if (ret)
return ret;
+ spin_lock_init(&txq->lock);
+
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
return 0;
@@ -404,8 +416,6 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
enum dma_data_direction dma_dir;
- unsigned long flags;
- spinlock_t *lock;
if (!q->n_bd)
return;
@@ -413,22 +423,19 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
/* In the command queue, all the TBs are mapped as BIDI
* so unmap them as such.
*/
- if (txq_id == trans->shrd->cmd_queue) {
+ if (txq_id == trans_pcie->cmd_queue)
dma_dir = DMA_BIDIRECTIONAL;
- lock = &trans->hcmd_lock;
- } else {
+ else
dma_dir = DMA_TO_DEVICE;
- lock = &trans->shrd->sta_lock;
- }
- spin_lock_irqsave(lock, flags);
+ spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
/* The read_ptr needs to bound by q->n_window */
iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock_bh(&txq->lock);
}
/**
@@ -443,7 +450,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct device *dev = bus(trans)->dev;
+ struct device *dev = trans->dev;
int i;
if (WARN_ON(!txq))
return;
@@ -452,7 +459,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->shrd->cmd_queue)
+ if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++)
kfree(txq->cmd[i]);
@@ -490,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
/* Tx queues */
if (trans_pcie->txq) {
for (txq_id = 0;
- txq_id < hw_params(trans).max_txq_num; txq_id++)
+ txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
iwl_tx_queue_free(trans, txq_id);
}
@@ -515,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
+ u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
@@ -539,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
goto error;
}
- trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
+ trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
sizeof(struct iwl_tx_queue), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
@@ -548,8 +555,9 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
@@ -581,20 +589,21 @@ static int iwl_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Turn off all Tx DMA fifos */
- iwl_write_prph(bus(trans), SCD_TXFACT, 0);
+ iwl_write_prph(trans, SCD_TXFACT, 0);
/* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
- slots_num = (txq_id == trans->shrd->cmd_queue) ?
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
slots_num, txq_id);
@@ -619,49 +628,220 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
* to set power to V_AUX, do:
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
*/
- iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
}
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct pci_dev *pci_dev = trans_pcie->pci_dev;
+
+ pos = pci_pcie_cap(pci_dev);
+ pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+static void iwl_apm_config(struct iwl_trans *trans)
+{
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ u16 lctl = iwl_pciexp_link_ctrl(trans);
+
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, trans->dev,
+ "L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, trans->dev,
+ "L1 Disabled; Enabling L0S\n");
+ }
+ trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_apm_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = 0;
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwl_apm_config(trans);
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (cfg(trans)->base_params->pll_cfg_val)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG,
+ cfg(trans)->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+out:
+ return ret;
+}
+
+static int iwl_apm_stop_master(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(trans, "stop master\n");
+
+ return ret;
+}
+
+static void iwl_apm_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+ /* Stop device's DMA activity */
+ iwl_apm_stop_master(trans);
+
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
static int iwl_nic_init(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans->shrd->lock, flags);
- iwl_apm_init(priv(trans));
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_apm_init(trans);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(bus(trans), CSR_INT_COALESCING,
+ iwl_write8(trans, CSR_INT_COALESCING,
IWL_HOST_INT_CALIB_TIMEOUT_DEF);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
iwl_set_pwr_vmain(trans);
- iwl_nic_config(priv(trans));
+ iwl_op_mode_nic_config(trans->op_mode);
+#ifndef CONFIG_IWLWIFI_IDI
/* Allocate the RX queue, or reset if it is already allocated */
iwl_rx_init(trans);
+#endif
/* Allocate or reset and init all Tx and Command queues */
if (iwl_tx_init(trans))
return -ENOMEM;
- if (hw_params(trans).shadow_reg_enable) {
+ if (cfg(trans)->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
- iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
0x800FFFFF);
}
- set_bit(STATUS_INIT, &trans->shrd->status);
-
return 0;
}
@@ -672,11 +852,11 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
{
int ret;
- iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
- ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT);
@@ -686,21 +866,22 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
}
/* Note: returns standard 0/-ERROR code */
-static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
+static int iwl_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
ret = iwl_set_hw_ready(trans);
+ /* If the card is ready, exit 0 */
if (ret >= 0)
return 0;
/* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
- ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
@@ -767,13 +948,90 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
7, 6, 5, 4,
};
-static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
+/*
+ * ucode
+ */
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+ const struct fw_desc *section)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ dma_addr_t phy_addr = section->p_addr;
+ u32 byte_cnt = section->len;
+ u32 dst_addr = section->offset;
+ int ret;
+
+ trans_pcie->ucode_write_complete = false;
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(trans,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+ section_num);
+ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+ trans_pcie->ucode_write_complete, 5 * HZ);
+ if (!ret) {
+ IWL_ERR(trans, "Could not load the [%d] uCode section\n",
+ section_num);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].p_addr)
+ break;
+
+ ret = iwl_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(trans, CSR_RESET, 0);
+
+ return 0;
+}
+
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw)
{
int ret;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
- trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
@@ -783,26 +1041,23 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
- if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
- iwl_trans_pcie_prepare_card_hw(trans)) {
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
- else
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- if (iwl_is_rfkill(trans->shrd)) {
- iwl_set_hw_rfkill_state(priv(trans), true);
- iwl_enable_interrupts(trans);
+ if (hw_rfkill) {
+ iwl_enable_rfkill_int(trans);
return -ERFKILL;
}
- iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
ret = iwl_nic_init(trans);
if (ret) {
@@ -811,31 +1066,37 @@ static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
}
/* make sure rfkill handshake bits are cleared */
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
/* clear (again), then enable host interrupts */
- iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
iwl_enable_interrupts(trans);
/* really make sure rfkill handshake bits are cleared */
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- return 0;
+ /* Load the given image to the HW */
+ return iwl_load_given_ucode(trans, fw);
}
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->shrd->lock and mac access
+ * must be called under the irq lock and with MAC access
*/
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
- iwl_write_prph(bus(trans), SCD_TXFACT, mask);
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
+
+ iwl_write_prph(trans, SCD_TXFACT, mask);
}
-static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
+static void iwl_tx_start(struct iwl_trans *trans)
{
const struct queue_to_fifo_ac *queue_to_fifo;
struct iwl_trans_pcie *trans_pcie =
@@ -845,49 +1106,50 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
int i, chan;
u32 reg_val;
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
trans_pcie->scd_base_addr =
- iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
/* reset conext data memory */
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
a += 4)
- iwl_write_targ_mem(bus(trans), a, 0);
+ iwl_write_targ_mem(trans, a, 0);
/* reset tx status memory */
for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
a += 4)
- iwl_write_targ_mem(bus(trans), a, 0);
+ iwl_write_targ_mem(trans, a, 0);
for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
+ SCD_TRANS_TBL_OFFSET_QUEUE(
+ cfg(trans)->base_params->num_of_queues);
a += 4)
- iwl_write_targ_mem(bus(trans), a, 0);
+ iwl_write_targ_mem(trans, a, 0);
- iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
- reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
- iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans));
- iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
+ iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
+ SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
+ iwl_write_prph(trans, SCD_AGGR_SEL, 0);
/* initiate the queues */
- for (i = 0; i < hw_params(trans).max_txq_num; i++) {
- iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
((SCD_WIN_SIZE <<
@@ -898,8 +1160,8 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
}
- iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
- IWL_MASK(0, hw_params(trans).max_txq_num));
+ iwl_write_prph(trans, SCD_INTERRUPT_MASK,
+ IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -910,7 +1172,7 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
else
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
+ iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
/* make sure all queue are not stopped */
memset(&trans_pcie->queue_stopped[0], 0,
@@ -941,40 +1203,47 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
fifo, 0);
}
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* Enable L1-Active */
- iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
+{
+ iwl_reset_ict(trans);
+ iwl_tx_start(trans);
+}
+
/**
* iwlagn_txq_ctx_stop - Stop all Tx DMA channels
*/
static int iwl_trans_tx_stop(struct iwl_trans *trans)
{
- int ch, txq_id;
+ int ch, txq_id, ret;
unsigned long flags;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_trans_txq_set_sched(trans, 0);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(bus(trans),
+ iwl_write_direct32(trans,
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
+ ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
+ 1000);
+ if (ret < 0)
IWL_ERR(trans, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(bus(trans),
+ iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
if (!trans_pcie->txq) {
IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
@@ -982,7 +1251,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
}
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+ txq_id++)
iwl_tx_queue_unmap(trans, txq_id);
return 0;
@@ -994,9 +1264,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* device going down, Stop using ICT table */
iwl_disable_ict(trans);
@@ -1008,36 +1278,50 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
iwl_trans_tx_stop(trans);
+#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_rx_stop(trans);
-
+#endif
/* Power-down device's busmaster DMA clocks */
- iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
+ iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
}
/* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
- iwl_apm_stop(priv(trans));
+ iwl_apm_stop(trans);
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans->shrd->lock, flags);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* wait to make sure we flush pending tasklet*/
- synchronize_irq(bus(trans)->irq);
+ synchronize_irq(trans_pcie->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
/* stop and reset the on-board processor */
- iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+}
+
+static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+{
+ /* let the ucode operate on its own */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ iwl_disable_interrupts(trans);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1092,6 +1376,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ spin_lock(&txq->lock);
+
/* In AGG mode, the index in the ring must correspond to the WiFi
* sequence number. This is a HW requirements to help the SCD to parse
* the BA.
@@ -1134,11 +1420,11 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
- txcmd_phys = dma_map_single(bus(trans)->dev,
+ txcmd_phys = dma_map_single(trans->dev,
&dev_cmd->hdr, firstlen,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
- return -1;
+ if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+ goto out_err;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
@@ -1153,14 +1439,14 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* if any (802.11 null frames have no payload). */
secondlen = skb->len - hdr_len;
if (secondlen > 0) {
- phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
+ phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
- dma_unmap_single(bus(trans)->dev,
+ if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+ dma_unmap_single(trans->dev,
dma_unmap_addr(out_meta, mapping),
dma_unmap_len(out_meta, len),
DMA_BIDIRECTIONAL);
- return -1;
+ goto out_err;
}
}
@@ -1174,7 +1460,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
offsetof(struct iwl_tx_cmd, scratch);
/* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
+ dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -1182,16 +1468,14 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
le16_to_cpu(dev_cmd->hdr.sequence));
IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
- dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
+ dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
- trace_iwlwifi_dev_tx(priv(trans),
+ trace_iwlwifi_dev_tx(trans->dev,
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
@@ -1212,46 +1496,77 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(trans, txq, "Queue is full");
+ iwl_stop_queue(trans, txq);
}
}
+ spin_unlock(&txq->lock);
return 0;
+ out_err:
+ spin_unlock(&txq->lock);
+ return -1;
}
-static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(bus(trans), CSR_RESET, 0);
-}
-
-static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
+ bool hw_rfkill;
trans_pcie->inta_mask = CSR_INI_SET_MASK;
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)trans);
+ if (!trans_pcie->irq_requested) {
+ tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)trans);
- iwl_alloc_isr_ict(trans);
+ iwl_alloc_isr_ict(trans);
- err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, trans);
+ err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
+ DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n",
+ trans_pcie->irq);
+ goto error;
+ }
+
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
+ trans_pcie->irq_requested = true;
+ }
+
+ err = iwl_prepare_card_hw(trans);
if (err) {
- IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
- iwl_free_isr_ict(trans);
- return err;
+ IWL_ERR(trans, "Error while preparing HW: %d", err);
+ goto err_free_irq;
}
- INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
- return 0;
+ iwl_apm_init(trans);
+
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return err;
+
+err_free_irq:
+ free_irq(trans_pcie->irq, trans);
+error:
+ iwl_free_isr_ict(trans);
+ tasklet_kill(&trans_pcie->irq_tasklet);
+ return err;
+}
+
+static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
+{
+ iwl_apm_stop(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /* Even if we stop the HW, we still want the RF kill interrupt */
+ iwl_enable_rfkill_int(trans);
}
static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
- int txq_id, int ssn, u32 status,
- struct sk_buff_head *skbs)
+ int txq_id, int ssn, struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1259,6 +1574,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
+ spin_lock(&txq->lock);
+
txq->time_stamp = jiffies;
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
@@ -1273,6 +1590,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
"agg_txq[sta_id[tid] %d", txq_id,
trans_pcie->agg_txq[sta_id][tid]);
+ spin_unlock(&txq->lock);
return 1;
}
@@ -1281,115 +1599,90 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
- (!txq->sched_retry ||
- status != TX_STATUS_FAIL_PASSIVE_NO_RX))
- iwl_wake_queue(trans, txq, "Packets reclaimed");
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
}
+
+ spin_unlock(&txq->lock);
return 0;
}
-static void iwl_trans_pcie_free(struct iwl_trans *trans)
+static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
- iwl_calib_free_results(trans);
- iwl_trans_pcie_tx_free(trans);
- iwl_trans_pcie_rx_free(trans);
- free_irq(bus(trans)->irq, trans);
- iwl_free_isr_ict(trans);
- trans->shrd->trans = NULL;
- kfree(trans);
+ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend
- * function first but since iwlagn_mac_stop() has no knowledge of
- * who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- *
- * But of course ... if we have configured WoWLAN then we did other
- * things already :-)
- */
- if (!trans->shrd->wowlan) {
- iwl_apm_stop(priv(trans));
- } else {
- iwl_disable_interrupts(trans);
- iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- }
-
- return 0;
+ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
- bool hw_rfkill = false;
-
- iwl_enable_interrupts(trans);
+ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
- if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
+ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+ if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+ trans_pcie->n_no_reclaim_cmds = 0;
else
- clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
-
- iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
-
- return 0;
+ trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+ if (trans_pcie->n_no_reclaim_cmds)
+ memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+ trans_pcie->n_no_reclaim_cmds * sizeof(u8));
}
-#endif /* CONFIG_PM_SLEEP */
-static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg)
+static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
- u8 ac, txq_id;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- for (ac = 0; ac < AC_NUM; ac++) {
- txq_id = trans_pcie->ac_to_queue[ctx][ac];
- IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
- ac,
- (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
- ? "stopped" : "awake");
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
+ iwl_trans_pcie_tx_free(trans);
+#ifndef CONFIG_IWLWIFI_IDI
+ iwl_trans_pcie_rx_free(trans);
+#endif
+ if (trans_pcie->irq_requested == true) {
+ free_irq(trans_pcie->irq, trans);
+ iwl_free_isr_ict(trans);
}
-}
-const struct iwl_trans_ops trans_ops_pcie;
+ pci_disable_msi(trans_pcie->pci_dev);
+ iounmap(trans_pcie->hw_base);
+ pci_release_regions(trans_pcie->pci_dev);
+ pci_disable_device(trans_pcie->pci_dev);
-static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
-{
- struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
- sizeof(struct iwl_trans_pcie),
- GFP_KERNEL);
- if (iwl_trans) {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- iwl_trans->ops = &trans_ops_pcie;
- iwl_trans->shrd = shrd;
- trans_pcie->trans = iwl_trans;
- spin_lock_init(&iwl_trans->hcmd_lock);
- }
+ trans->shrd->trans = NULL;
+ kfree(trans);
+}
- return iwl_trans;
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+ return 0;
}
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
- const char *msg)
+static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+
+ hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+ if (hw_rfkill)
+ iwl_enable_rfkill_int(trans);
+ else
+ iwl_enable_interrupts(trans);
- iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return 0;
}
+#endif /* CONFIG_PM_SLEEP */
#define IWL_FLUSH_WAIT_MS 2000
@@ -1403,8 +1696,8 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
int ret = 0;
/* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
- if (cnt == trans->shrd->cmd_queue)
+ for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+ if (cnt == trans_pcie->cmd_queue)
continue;
txq = &trans_pcie->txq[cnt];
q = &txq->q;
@@ -1446,9 +1739,9 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
- iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
& (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
return 1;
}
@@ -1502,7 +1795,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
- iwl_read_direct32(bus(trans), fh_tbl[i]));
+ iwl_read_direct32(trans, fh_tbl[i]));
}
return pos;
}
@@ -1511,7 +1804,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
- iwl_read_direct32(bus(trans), fh_tbl[i]));
+ iwl_read_direct32(trans, fh_tbl[i]));
}
return 0;
}
@@ -1581,7 +1874,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
IWL_ERR(trans, " %25s: 0X%08x\n",
get_csr_string(csr_tbl[i]),
- iwl_read32(bus(trans), csr_tbl[i]));
+ iwl_read32(trans, csr_tbl[i]));
}
}
@@ -1649,7 +1942,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
int pos = 0;
int cnt;
int ret;
- const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
+ size_t bufsz;
+
+ bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
if (!trans_pcie->txq) {
IWL_ERR(trans, "txq not ready\n");
@@ -1659,7 +1954,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
if (!buf)
return -ENOMEM;
- for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
+ for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
@@ -1902,14 +2197,13 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
#endif /*CONFIG_IWLWIFI_DEBUGFS */
const struct iwl_trans_ops trans_ops_pcie = {
- .alloc = iwl_trans_pcie_alloc,
- .request_irq = iwl_trans_pcie_request_irq,
- .start_device = iwl_trans_pcie_start_device,
- .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
+ .start_hw = iwl_trans_pcie_start_hw,
+ .stop_hw = iwl_trans_pcie_stop_hw,
+ .fw_alive = iwl_trans_pcie_fw_alive,
+ .start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .tx_start = iwl_trans_pcie_tx_start,
- .wake_any_queue = iwl_trans_pcie_wake_any_queue,
+ .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
.send_cmd = iwl_trans_pcie_send_cmd,
@@ -1920,10 +2214,7 @@ const struct iwl_trans_ops trans_ops_pcie = {
.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
- .kick_nic = iwl_trans_pcie_kick_nic,
-
.free = iwl_trans_pcie_free,
- .stop_queue = iwl_trans_pcie_stop_queue,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
@@ -1934,4 +2225,121 @@ const struct iwl_trans_ops trans_ops_pcie = {
.suspend = iwl_trans_pcie_suspend,
.resume = iwl_trans_pcie_resume,
#endif
+ .write8 = iwl_trans_pcie_write8,
+ .write32 = iwl_trans_pcie_write32,
+ .read32 = iwl_trans_pcie_read32,
+ .configure = iwl_trans_pcie_configure,
};
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
+ struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ u16 pci_cmd;
+ int err;
+
+ trans = kzalloc(sizeof(struct iwl_trans) +
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+
+ if (WARN_ON(!trans))
+ return NULL;
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans->ops = &trans_ops_pcie;
+ trans->shrd = shrd;
+ trans_pcie->trans = trans;
+ spin_lock_init(&trans_pcie->irq_lock);
+ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_no_pci;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+ goto out_pci_disable_device;
+ }
+
+ trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!trans_pcie->hw_base) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_base = %p\n", trans_pcie->hw_base);
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "HW Revision ID = 0x%X\n", pdev->revision);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ err = pci_enable_msi(pdev);
+ if (err)
+ dev_printk(KERN_ERR, &pdev->dev,
+ "pci_enable_msi failed(0X%x)", err);
+
+ trans->dev = &pdev->dev;
+ trans_pcie->irq = pdev->irq;
+ trans_pcie->pci_dev = pdev;
+ trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+ snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+ "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+
+ /* TODO: Move this away, not needed if not MSI */
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans->wait_command_queue);
+
+ return trans;
+
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_no_pci:
+ kfree(trans);
+ return NULL;
+}
+
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index e6bf3f554772..0c81cbaa8088 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,23 +63,132 @@
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
-#include <linux/debugfs.h>
-#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <linux/mm.h> /* for page_address */
#include "iwl-shared.h"
-#include "iwl-commands.h"
+#include "iwl-debug.h"
- /*This file includes the declaration that are exported from the transport
- * layer */
+/**
+ * DOC: Transport layer - what is it ?
+ *
+ * The tranport layer is the layer that deals with the HW directly. It provides
+ * an abstraction of the underlying HW to the upper layer. The transport layer
+ * doesn't provide any policy, algorithm or anything of this kind, but only
+ * mechanisms to make the HW do something.It is not completely stateless but
+ * close to it.
+ * We will have an implementation for each different supported bus.
+ */
+
+/**
+ * DOC: Life cycle of the transport layer
+ *
+ * The transport layer has a very precise life cycle.
+ *
+ * 1) A helper function is called during the module initialization and
+ * registers the bus driver's ops with the transport's alloc function.
+ * 2) Bus's probe calls to the transport layer's allocation functions.
+ * Of course this function is bus specific.
+ * 3) This allocation functions will spawn the upper layer which will
+ * register mac80211.
+ *
+ * 4) At some point (i.e. mac80211's start call), the op_mode will call
+ * the following sequence:
+ * start_hw
+ * start_fw
+ *
+ * 5) Then when finished (or reset):
+ * stop_fw (a.k.a. stop device for the moment)
+ * stop_hw
+ *
+ * 6) Eventually, the free function will be called.
+ */
struct iwl_priv;
struct iwl_shared;
+struct iwl_op_mode;
+struct fw_img;
+struct sk_buff;
+struct dentry;
+/**
+ * DOC: Host command section
+ *
+ * A host command is a commaned issued by the upper layer to the fw. There are
+ * several versions of fw that have several APIs. The transport layer is
+ * completely agnostic to these differences.
+ * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
+ */
#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s) ((s) & 0xff)
+#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+ u8 cmd; /* Command ID: REPLY_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+ /*
+ * The driver sets up the sequence number to values of its choosing.
+ * uCode does not use this value, but passes it back to the driver
+ * when sending the response to each driver-originated command, so
+ * the driver can match the response to the command. Since the values
+ * don't get used by uCode, the driver may set up an arbitrary format.
+ *
+ * There is one exception: uCode sets bit 15 when it originates
+ * the response/notification, i.e. when the response/notification
+ * is not a direct response to a command sent by the driver. For
+ * example, uCode issues REPLY_RX when it sends a received frame
+ * to the driver; it is not a direct response to any driver command.
+ *
+ * The Linux driver uses the following format:
+ *
+ * 0:7 tfd index - position within TX queue
+ * 8:12 TX queue id
+ * 13:14 reserved
+ * 15 unsolicited RX or uCode-originated notification
+ */
+ __le16 sequence;
+} __packed;
+
-enum {
+#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
+
+struct iwl_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-14: Reserved
+ * 13-00: RX frame size
+ */
+ __le32 len_n_flags;
+ struct iwl_cmd_header hdr;
+ u8 data[];
+} __packed;
+
+/**
+ * enum CMD_MODE - how to send the host commands ?
+ *
+ * @CMD_SYNC: The caller will be stalled until the fw responds to the command
+ * @CMD_ASYNC: Return right away and don't want for the response
+ * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
+ * response.
+ * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
+ */
+enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
@@ -104,25 +213,38 @@ struct iwl_device_cmd {
#define IWL_MAX_CMD_TFDS 2
+/**
+ * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
+ *
+ * IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
+ * ring. The transport layer doesn't map the command's buffer to DMA, but
+ * rather copies it to an previously allocated DMA buffer. This flag tells
+ * the transport layer not to copy the command, but to map the existing
+ * buffer. This can save memcpy and is worth with very big comamnds.
+ */
enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
};
/**
* struct iwl_host_cmd - Host command to the uCode
+ *
* @data: array of chunks that composes the data of the host command
- * @reply_page: pointer to the page that holds the response to the host command
+ * @resp_pkt: response packet, if %CMD_WANT_SKB was set
+ * @_rx_page_order: (internally used to free response packet)
+ * @_rx_page_addr: (internally used to free response packet)
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
- * @callback:
- * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
+ * @flags: can be CMD_*
* @len: array of the lenths of the chunks in data
- * @dataflags:
+ * @dataflags: IWL_HCMD_DFL_*
* @id: id of the host command
*/
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TFDS];
- unsigned long reply_page;
+ struct iwl_rx_packet *resp_pkt;
+ unsigned long _rx_page_addr;
+ u32 _rx_page_order;
int handler_status;
u32 flags;
@@ -131,48 +253,109 @@ struct iwl_host_cmd {
u8 id;
};
+static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
+{
+ free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
+}
+
+struct iwl_rx_cmd_buffer {
+ struct page *_page;
+};
+
+static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
+{
+ return page_address(r->_page);
+}
+
+static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
+{
+ struct page *p = r->_page;
+ r->_page = NULL;
+ return p;
+}
+
+#define MAX_NO_RECLAIM_CMDS 6
+
+/**
+ * struct iwl_trans_config - transport configuration
+ *
+ * @op_mode: pointer to the upper layer.
+ * Must be set before any other call.
+ * @cmd_queue: the index of the command queue.
+ * Must be set before start_fw.
+ * @no_reclaim_cmds: Some devices erroneously don't set the
+ * SEQ_RX_FRAME bit on some notifications, this is the
+ * list of such notifications to filter. Max length is
+ * %MAX_NO_RECLAIM_CMDS.
+ * @n_no_reclaim_cmds: # of commands in list
+ */
+struct iwl_trans_config {
+ struct iwl_op_mode *op_mode;
+ u8 cmd_queue;
+ const u8 *no_reclaim_cmds;
+ int n_no_reclaim_cmds;
+};
+
/**
* struct iwl_trans_ops - transport specific operations
- * @alloc: allocates the meta data (not the queues themselves)
- * @request_irq: requests IRQ - will be called before the FW load in probe flow
- * @start_device: allocates and inits all the resources for the transport
- * layer.
- * @prepare_card_hw: claim the ownership on the HW. Will be called during
- * probe.
- * @tx_start: starts and configures all the Tx fifo - usually done once the fw
- * is alive.
- * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
+ *
+ * All the handlers MUST be implemented
+ *
+ * @start_hw: starts the HW- from that point on, the HW can send interrupts
+ * May sleep
+ * @stop_hw: stops the HW- from that point on, the HW will be in low power but
+ * will still issue interrupt if the HW RF kill is triggered.
+ * May sleep
+ * @start_fw: allocates and inits all the resources for the transport
+ * layer. Also kick a fw image.
+ * May sleep
+ * @fw_alive: called when the fw sends alive notification
+ * May sleep
* @stop_device:stops the whole device (embedded CPU put to reset)
+ * May sleep
+ * @wowlan_suspend: put the device into the correct mode for WoWLAN during
+ * suspend. This is optional, if not implemented WoWLAN will not be
+ * supported. This callback may sleep.
* @send_cmd:send a host command
+ * May sleep only if CMD_SYNC is set
* @tx: send an skb
+ * Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
+ * Must be atomic
* @tx_agg_alloc: allocate resources for a TX BA session
+ * Must be atomic
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
- * ready and a successful ADDBA response has been received.
+ * ready and a successful ADDBA response has been received.
+ * May sleep
* @tx_agg_disable: de-configure a Tx queue to send AMPDUs
- * @kick_nic: remove the RESET from the embedded CPU and let it run
+ * Must be atomic
* @free: release all the ressource for the transport layer itself such as
- * irq, tasklet etc...
- * @stop_queue: stop a specific queue
+ * irq, tasklet etc... From this point on, the device may not issue
+ * any interrupt (incl. RFKILL).
+ * May sleep
* @check_stuck_queue: check if a specific queue is stuck
* @wait_tx_queue_empty: wait until all tx queues are empty
+ * May sleep
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @suspend: stop the device unless WoWLAN is configured
* @resume: resume activity of the device
+ * @write8: write a u8 to a register at offset ofs from the BAR
+ * @write32: write a u32 to a register at offset ofs from the BAR
+ * @read32: read a u32 register at offset ofs from the BAR
+ * @configure: configure parameters required by the transport layer from
+ * the op_mode. May be called several times before start_fw, can't be
+ * called after that.
*/
struct iwl_trans_ops {
- struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
- int (*request_irq)(struct iwl_trans *iwl_trans);
- int (*start_device)(struct iwl_trans *trans);
- int (*prepare_card_hw)(struct iwl_trans *trans);
+ int (*start_hw)(struct iwl_trans *iwl_trans);
+ void (*stop_hw)(struct iwl_trans *iwl_trans);
+ int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
+ void (*fw_alive)(struct iwl_trans *trans);
void (*stop_device)(struct iwl_trans *trans);
- void (*tx_start)(struct iwl_trans *trans);
- void (*wake_any_queue)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg);
+ void (*wowlan_suspend)(struct iwl_trans *trans);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -180,8 +363,7 @@ struct iwl_trans_ops {
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid);
int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
- int txq_id, int ssn, u32 status,
- struct sk_buff_head *skbs);
+ int txq_id, int ssn, struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
int sta_id, int tid);
@@ -191,12 +373,8 @@ struct iwl_trans_ops {
enum iwl_rxon_context_id ctx, int sta_id, int tid,
int frame_limit, u16 ssn);
- void (*kick_nic)(struct iwl_trans *trans);
-
void (*free)(struct iwl_trans *trans);
- void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
-
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
@@ -204,127 +382,165 @@ struct iwl_trans_ops {
int (*suspend)(struct iwl_trans *trans);
int (*resume)(struct iwl_trans *trans);
#endif
+ void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
+ void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
+ u32 (*read32)(struct iwl_trans *trans, u32 ofs);
+ void (*configure)(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
};
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- dma_addr_t p_addr; /* hardware address */
- void *v_addr; /* software address */
- u32 len; /* size in bytes */
-};
-
-struct fw_img {
- struct fw_desc code; /* firmware code image */
- struct fw_desc data; /* firmware data image */
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- struct list_head list;
- size_t cmd_len;
- struct iwl_calib_hdr hdr;
- /* data follows */
+/**
+ * enum iwl_trans_state - state of the transport layer
+ *
+ * @IWL_TRANS_NO_FW: no fw has sent an alive response
+ * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
+ */
+enum iwl_trans_state {
+ IWL_TRANS_NO_FW = 0,
+ IWL_TRANS_FW_ALIVE = 1,
};
/**
* struct iwl_trans - transport common data
+ *
* @ops - pointer to iwl_trans_ops
+ * @op_mode - pointer to the op_mode
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
- * @hcmd_lock: protects HCMD
- * @ucode_write_complete: indicates that the ucode has been copied.
- * @ucode_rt: run time ucode image
- * @ucode_init: init ucode image
- * @ucode_wowlan: wake on wireless ucode image (optional)
+ * @reg_lock - protect hw register access
+ * @dev - pointer to struct device * that represents the device
+ * @hw_id: a u32 with the ID of the device / subdevice.
+ * Set during transport allocation.
+ * @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @nvm_device_type: indicates OTP or eeprom
- * @calib_results: list head for init calibration results
+ * @pm_support: set to true in start_hw if link pm is supported
+ * @wait_command_queue: the wait_queue for SYNC host commands
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
+ struct iwl_op_mode *op_mode;
struct iwl_shared *shrd;
- spinlock_t hcmd_lock;
+ enum iwl_trans_state state;
+ spinlock_t reg_lock;
- u8 ucode_write_complete; /* the image write is complete */
- struct fw_img ucode_rt;
- struct fw_img ucode_init;
- struct fw_img ucode_wowlan;
+ struct device *dev;
+ u32 hw_rev;
+ u32 hw_id;
+ char hw_id_str[52];
- /* eeprom related variables */
int nvm_device_type;
+ bool pm_support;
- /* init calibration results */
- struct list_head calib_results;
+ wait_queue_head_t wait_command_queue;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
- char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
+ char trans_specific[0] __aligned(sizeof(void *));
};
-static inline int iwl_trans_request_irq(struct iwl_trans *trans)
+static inline void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
{
- return trans->ops->request_irq(trans);
+ /*
+ * only set the op_mode for the moment. Later on, this function will do
+ * more
+ */
+ trans->op_mode = trans_cfg->op_mode;
+
+ trans->ops->configure(trans, trans_cfg);
}
-static inline int iwl_trans_start_device(struct iwl_trans *trans)
+static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
- return trans->ops->start_device(trans);
+ might_sleep();
+
+ return trans->ops->start_hw(trans);
}
-static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
+static inline void iwl_trans_stop_hw(struct iwl_trans *trans)
{
- return trans->ops->prepare_card_hw(trans);
+ might_sleep();
+
+ trans->ops->stop_hw(trans);
+
+ trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_stop_device(struct iwl_trans *trans)
+static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
{
- trans->ops->stop_device(trans);
+ might_sleep();
+
+ trans->ops->fw_alive(trans);
+
+ trans->state = IWL_TRANS_FW_ALIVE;
}
-static inline void iwl_trans_tx_start(struct iwl_trans *trans)
+static inline int iwl_trans_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw)
{
- trans->ops->tx_start(trans);
+ might_sleep();
+
+ return trans->ops->start_fw(trans, fw);
}
-static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- const char *msg)
+static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
- trans->ops->wake_any_queue(trans, ctx, msg);
+ might_sleep();
+
+ trans->ops->stop_device(trans);
+
+ trans->state = IWL_TRANS_NO_FW;
}
+static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans)
+{
+ might_sleep();
+ trans->ops->wowlan_suspend(trans);
+}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
return trans->ops->send_cmd(trans, cmd);
}
-int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
- u32 flags, u16 len, const void *data);
-
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid)
{
+ if (trans->state != IWL_TRANS_FW_ALIVE)
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+
return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
}
static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
- int tid, int txq_id, int ssn, u32 status,
+ int tid, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
- return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
- status, skbs);
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
+ return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
int sta_id, int tid)
{
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
return trans->ops->tx_agg_disable(trans, sta_id, tid);
}
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
int sta_id, int tid)
{
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
return trans->ops->tx_agg_alloc(trans, sta_id, tid);
}
@@ -334,12 +550,12 @@ static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
int sta_id, int tid,
int frame_limit, u16 ssn)
{
- trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
-}
+ might_sleep();
-static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
-{
- trans->ops->kick_nic(trans);
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
+ trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
}
static inline void iwl_trans_free(struct iwl_trans *trans)
@@ -347,19 +563,19 @@ static inline void iwl_trans_free(struct iwl_trans *trans)
trans->ops->free(trans);
}
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
- const char *msg)
-{
- trans->ops->stop_queue(trans, q, msg);
-}
-
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
return trans->ops->wait_tx_queue_empty(trans);
}
static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
{
+ WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "%s bad state = %d", __func__, trans->state);
+
return trans->ops->check_stuck_queue(trans, q);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
@@ -380,18 +596,35 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
}
#endif
-/*****************************************************
-* Transport layers implementations
-******************************************************/
-extern const struct iwl_trans_ops trans_ops_pcie;
+static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ trans->ops->write8(trans, ofs, val);
+}
-int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
- const void *data, size_t len);
-void iwl_dealloc_ucode(struct iwl_trans *trans);
+static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ trans->ops->write32(trans, ofs, val);
+}
-int iwl_send_calib_results(struct iwl_trans *trans);
-int iwl_calib_set(struct iwl_trans *trans,
- const struct iwl_calib_hdr *cmd, int len);
-void iwl_calib_free_results(struct iwl_trans *trans);
+static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return trans->ops->read32(trans, ofs);
+}
+/*****************************************************
+* Transport layers implementations + their allocation function
+******************************************************/
+struct pci_dev;
+struct pci_device_id;
+extern const struct iwl_trans_ops trans_ops_pcie;
+struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
+ struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+int __must_check iwl_pci_register_driver(void);
+void iwl_pci_unregister_driver(void);
+
+extern const struct iwl_trans_ops trans_ops_idi;
+struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
+ void *pdev_void,
+ const void *ent_void);
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 36a1b5b25858..252828728837 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,12 +28,8 @@
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -42,6 +38,7 @@
#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-fh.h"
+#include "iwl-op-mode.h"
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@@ -80,157 +77,35 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
*
******************************************************************************/
-static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+static inline const struct fw_img *
+iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
{
- if (desc->v_addr)
- dma_free_coherent(bus->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
-{
- iwl_free_fw_desc(bus, &img->code);
- iwl_free_fw_desc(bus, &img->data);
-}
-
-void iwl_dealloc_ucode(struct iwl_trans *trans)
-{
- iwl_free_fw_img(bus(trans), &trans->ucode_rt);
- iwl_free_fw_img(bus(trans), &trans->ucode_init);
- iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
-}
-
-int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
- const void *data, size_t len)
-{
- if (!len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(bus->dev, len,
- &desc->p_addr, GFP_KERNEL);
- if (!desc->v_addr)
- return -ENOMEM;
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
- desc->len = len;
- memcpy(desc->v_addr, data, len);
- return 0;
-}
-
-/*
- * ucode
- */
-static int iwl_load_section(struct iwl_trans *trans, const char *name,
- struct fw_desc *image, u32 dst_addr)
-{
- struct iwl_bus *bus = bus(trans);
- dma_addr_t phy_addr = image->p_addr;
- u32 byte_cnt = image->len;
- int ret;
-
- trans->ucode_write_complete = 0;
-
- iwl_write_direct32(bus,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
- iwl_write_direct32(bus,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
-
- iwl_write_direct32(bus,
- FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
- iwl_write_direct32(bus,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
- iwl_write_direct32(bus,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
- iwl_write_direct32(bus,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-
- IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
- ret = wait_event_timeout(trans->shrd->wait_command_queue,
- trans->ucode_write_complete, 5 * HZ);
- if (!ret) {
- IWL_ERR(trans, "Could not load the %s uCode section\n",
- name);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
- enum iwl_ucode_type ucode_type)
-{
- switch (ucode_type) {
- case IWL_UCODE_INIT:
- return &trans->ucode_init;
- case IWL_UCODE_WOWLAN:
- return &trans->ucode_wowlan;
- case IWL_UCODE_REGULAR:
- return &trans->ucode_rt;
- case IWL_UCODE_NONE:
- break;
- }
- return NULL;
-}
-
-static int iwl_load_given_ucode(struct iwl_trans *trans,
- enum iwl_ucode_type ucode_type)
-{
- int ret = 0;
- struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
-
-
- if (!image) {
- IWL_ERR(trans, "Invalid ucode requested (%d)\n",
- ucode_type);
- return -EINVAL;
- }
-
- ret = iwl_load_section(trans, "INST", &image->code,
- IWLAGN_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- return iwl_load_section(trans, "DATA", &image->data,
- IWLAGN_RTC_DATA_LOWER_BOUND);
+ return &priv->fw->img[ucode_type];
}
/*
* Calibration
*/
-static int iwl_set_Xtal_calib(struct iwl_trans *trans)
+static int iwl_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+ return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
}
-static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
+static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
@@ -239,48 +114,48 @@ static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
- IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
- return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+ return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
}
-static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ (__le16 *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
sizeof(*offset_calib_low));
if (!(cmd.radio_sensor_offset_low)) {
- IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
+ IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
memcpy(&cmd.burntVoltageRef, &hdr->voltage,
sizeof(hdr->voltage));
- IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
- IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
+ IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_low));
- IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
+ IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
- return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+ return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
}
-static int iwl_send_calib_cfg(struct iwl_trans *trans)
+static int iwl_send_calib_cfg(struct iwl_priv *priv)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
@@ -296,47 +171,47 @@ static int iwl_send_calib_cfg(struct iwl_trans *trans)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_trans_send_cmd(trans, &cmd);
+ return iwl_dvm_send_cmd(priv, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+ struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
/* reduce the size of the length field itself */
len -= 4;
- if (iwl_calib_set(trans(priv), hdr, len))
+ if (iwl_calib_set(priv, hdr, len))
IWL_ERR(priv, "Failed to record calibration data %d\n",
hdr->op_code);
return 0;
}
-int iwl_init_alive_start(struct iwl_trans *trans)
+int iwl_init_alive_start(struct iwl_priv *priv)
{
int ret;
- if (cfg(trans)->bt_params &&
- cfg(trans)->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
- ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
- ret = iwl_send_calib_cfg(trans);
+ ret = iwl_send_calib_cfg(priv);
if (ret)
return ret;
@@ -344,21 +219,21 @@ int iwl_init_alive_start(struct iwl_trans *trans)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (cfg(trans)->need_temp_offset_calib) {
- if (cfg(trans)->temp_offset_v2)
- return iwl_set_temperature_offset_calib_v2(trans);
+ if (cfg(priv)->need_temp_offset_calib) {
+ if (cfg(priv)->temp_offset_v2)
+ return iwl_set_temperature_offset_calib_v2(priv);
else
- return iwl_set_temperature_offset_calib(trans);
+ return iwl_set_temperature_offset_calib(priv);
}
return 0;
}
-static int iwl_send_wimax_coex(struct iwl_trans *trans)
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
{
struct iwl_wimax_coex_cmd coex_cmd;
- if (cfg(trans)->base_params->support_wimax_coexist) {
+ if (cfg(priv)->base_params->support_wimax_coexist) {
/* UnMask wake up src at associated sleep */
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
@@ -377,7 +252,7 @@ static int iwl_send_wimax_coex(struct iwl_trans *trans)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return iwl_trans_send_cmd_pdu(trans,
+ return iwl_dvm_send_cmd_pdu(priv,
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
@@ -404,64 +279,54 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
0, 0, 0, 0, 0, 0, 0
};
-void iwl_send_prio_tbl(struct iwl_trans *trans)
+void iwl_send_prio_tbl(struct iwl_priv *priv)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
sizeof(iwl_bt_prio_tbl));
- if (iwl_trans_send_cmd_pdu(trans,
+ if (iwl_dvm_send_cmd_pdu(priv,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
- IWL_ERR(trans, "failed to send BT prio tbl command\n");
+ IWL_ERR(priv, "failed to send BT prio tbl command\n");
}
-int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_trans_send_cmd_pdu(trans,
+ ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
- IWL_ERR(trans, "failed to send BT env command\n");
+ IWL_ERR(priv, "failed to send BT env command\n");
return ret;
}
-static int iwl_alive_notify(struct iwl_trans *trans)
+static int iwl_alive_notify(struct iwl_priv *priv)
{
- struct iwl_priv *priv = priv(trans);
- struct iwl_rxon_context *ctx;
int ret;
- if (!priv->tx_cmd_pool)
- priv->tx_cmd_pool =
- kmem_cache_create("iwl_dev_cmd",
- sizeof(struct iwl_device_cmd),
- sizeof(void *), 0, NULL);
+ iwl_trans_fw_alive(trans(priv));
- if (!priv->tx_cmd_pool)
- return -ENOMEM;
+ priv->passive_no_rx = false;
+ priv->transport_queue_stop = 0;
- iwl_trans_tx_start(trans);
- for_each_context(priv, ctx)
- ctx->last_tx_rejected = false;
-
- ret = iwl_send_wimax_coex(trans);
+ ret = iwl_send_wimax_coex(priv);
if (ret)
return ret;
if (!cfg(priv)->no_xtal_calib) {
- ret = iwl_set_Xtal_calib(trans);
+ ret = iwl_set_Xtal_calib(priv);
if (ret)
return ret;
}
- return iwl_send_calib_results(trans);
+ return iwl_send_calib_results(priv);
}
@@ -470,23 +335,23 @@ static int iwl_alive_notify(struct iwl_trans *trans)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwl_verify_inst_sparse(struct iwl_bus *bus,
- struct fw_desc *fw_desc)
+static int iwl_verify_sec_sparse(struct iwl_priv *priv,
+ const struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
u32 len = fw_desc->len;
u32 val;
u32 i;
- IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
- i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
+ iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR,
+ i + fw_desc->offset);
+ val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -494,8 +359,8 @@ static int iwl_verify_inst_sparse(struct iwl_bus *bus,
return 0;
}
-static void iwl_print_mismatch_inst(struct iwl_bus *bus,
- struct fw_desc *fw_desc)
+static void iwl_print_mismatch_sec(struct iwl_priv *priv,
+ const struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
u32 len = fw_desc->len;
@@ -503,18 +368,18 @@ static void iwl_print_mismatch_inst(struct iwl_bus *bus,
u32 offs;
int errors = 0;
- IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
- iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
- IWLAGN_RTC_INST_LOWER_BOUND);
+ iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR,
+ fw_desc->offset);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
- IWL_ERR(bus, "uCode INST section at "
+ IWL_ERR(priv, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
@@ -526,24 +391,24 @@ static void iwl_print_mismatch_inst(struct iwl_bus *bus,
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
-static int iwl_verify_ucode(struct iwl_trans *trans,
+static int iwl_verify_ucode(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type)
{
- struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+ const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
if (!img) {
- IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+ IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
return -EINVAL;
}
- if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
- IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
+ if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) {
+ IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
return 0;
}
- IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+ IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
- iwl_print_mismatch_inst(bus(trans), &img->code);
+ iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]);
return -EIO;
}
@@ -552,137 +417,76 @@ struct iwl_alive_data {
u8 subtype;
};
-static void iwl_alive_fn(struct iwl_trans *trans,
+static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt,
void *data)
{
+ struct iwl_priv *priv =
+ container_of(notif_wait, struct iwl_priv, notif_wait);
struct iwl_alive_data *alive_data = data;
struct iwl_alive_resp *palive;
- palive = &pkt->u.alive_frame;
+ palive = (void *)pkt->data;
- IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
- trans->shrd->device_pointers.error_event_table =
+ priv->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
- trans->shrd->device_pointers.log_event_table =
+ priv->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
-/* notification wait support */
-void iwl_init_notification_wait(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_trans *trans,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data)
-{
- wait_entry->fn = fn;
- wait_entry->fn_data = fn_data;
- wait_entry->cmd = cmd;
- wait_entry->triggered = false;
- wait_entry->aborted = false;
-
- spin_lock_bh(&shrd->notif_wait_lock);
- list_add(&wait_entry->list, &shrd->notif_waits);
- spin_unlock_bh(&shrd->notif_wait_lock);
-}
-
-int iwl_wait_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout)
-{
- int ret;
-
- ret = wait_event_timeout(shrd->notif_waitq,
- wait_entry->triggered || wait_entry->aborted,
- timeout);
-
- spin_lock_bh(&shrd->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&shrd->notif_wait_lock);
-
- if (wait_entry->aborted)
- return -EIO;
-
- /* return value is always >= 0 */
- if (ret <= 0)
- return -ETIMEDOUT;
- return 0;
-}
-
-void iwl_remove_notification(struct iwl_shared *shrd,
- struct iwl_notification_wait *wait_entry)
-{
- spin_lock_bh(&shrd->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&shrd->notif_wait_lock);
-}
-
-void iwl_abort_notification_waits(struct iwl_shared *shrd)
-{
- unsigned long flags;
- struct iwl_notification_wait *wait_entry;
-
- spin_lock_irqsave(&shrd->notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &shrd->notif_waits, list)
- wait_entry->aborted = true;
- spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
-
- wake_up_all(&shrd->notif_waitq);
-}
-
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
-int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
struct iwl_alive_data alive_data;
+ const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
- ret = iwl_trans_start_device(trans);
- if (ret)
- return ret;
+ old_type = priv->shrd->ucode_type;
+ priv->shrd->ucode_type = ucode_type;
+ fw = iwl_get_ucode_image(priv, ucode_type);
- iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
- iwl_alive_fn, &alive_data);
+ priv->ucode_loaded = false;
+
+ if (!fw)
+ return -EINVAL;
- old_type = trans->shrd->ucode_type;
- trans->shrd->ucode_type = ucode_type;
+ iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE,
+ iwl_alive_fn, &alive_data);
- ret = iwl_load_given_ucode(trans, ucode_type);
+ ret = iwl_trans_start_fw(trans(priv), fw);
if (ret) {
- trans->shrd->ucode_type = old_type;
- iwl_remove_notification(trans->shrd, &alive_wait);
+ priv->shrd->ucode_type = old_type;
+ iwl_remove_notification(&priv->notif_wait, &alive_wait);
return ret;
}
- iwl_trans_kick_nic(trans);
-
/*
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
- ret = iwl_wait_notification(trans->shrd, &alive_wait,
+ ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
UCODE_ALIVE_TIMEOUT);
if (ret) {
- trans->shrd->ucode_type = old_type;
+ priv->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
- IWL_ERR(trans, "Loaded ucode is not valid!\n");
- trans->shrd->ucode_type = old_type;
+ IWL_ERR(priv, "Loaded ucode is not valid!\n");
+ priv->shrd->ucode_type = old_type;
return -EIO;
}
@@ -692,9 +496,9 @@ int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(trans, ucode_type);
+ ret = iwl_verify_ucode(priv, ucode_type);
if (ret) {
- trans->shrd->ucode_type = old_type;
+ priv->shrd->ucode_type = old_type;
return ret;
}
@@ -702,41 +506,43 @@ int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
msleep(5);
}
- ret = iwl_alive_notify(trans);
+ ret = iwl_alive_notify(priv);
if (ret) {
- IWL_WARN(trans,
+ IWL_WARN(priv,
"Could not complete ALIVE transition: %d\n", ret);
- trans->shrd->ucode_type = old_type;
+ priv->shrd->ucode_type = old_type;
return ret;
}
+ priv->ucode_loaded = true;
+
return 0;
}
-int iwl_run_init_ucode(struct iwl_trans *trans)
+int iwl_run_init_ucode(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&trans->shrd->mutex);
+ lockdep_assert_held(&priv->mutex);
/* No init ucode required? Curious, but maybe ok */
- if (!trans->ucode_init.code.len)
+ if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
return 0;
- if (trans->shrd->ucode_type != IWL_UCODE_NONE)
+ if (priv->init_ucode_run)
return 0;
- iwl_init_notification_wait(trans->shrd, &calib_wait,
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
/* Will also start the device */
- ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
+ ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (ret)
goto error;
- ret = iwl_init_alive_start(trans);
+ ret = iwl_init_alive_start(priv);
if (ret)
goto error;
@@ -744,15 +550,19 @@ int iwl_run_init_ucode(struct iwl_trans *trans)
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
- ret = iwl_wait_notification(trans->shrd, &calib_wait,
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
UCODE_CALIB_TIMEOUT);
+ if (!ret)
+ priv->init_ucode_run = true;
goto out;
error:
- iwl_remove_notification(trans->shrd, &calib_wait);
+ iwl_remove_notification(&priv->notif_wait, &calib_wait);
out:
/* Whatever happened, stop the device */
- iwl_trans_stop_device(trans);
+ iwl_trans_stop_device(trans(priv));
+ priv->ucode_loaded = false;
+
return ret;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
index abb4805fa8df..f5f7070b7e22 100644
--- a/drivers/net/wireless/iwmc3200wifi/trace.h
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -144,7 +144,7 @@ TRACE_EVENT(iwm_tx_packets,
TP_printk(
IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
- "ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
+ "ra_tid 0x%x, credit_group 0x%x, embedded_packets %d, %d bytes",
IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
__entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
__entry->credit_group, __entry->npkt, __entry->bytes
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a7cd311cb1b7..3fa1ecebadfd 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1631,42 +1631,6 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
/*
- * "Site survey", here just current channel and noise level
- */
-
-static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
- int idx, struct survey_info *survey)
-{
- struct lbs_private *priv = wiphy_priv(wiphy);
- s8 signal, noise;
- int ret;
-
- if (dev == priv->mesh_dev)
- return -EOPNOTSUPP;
-
- if (idx != 0)
- ret = -ENOENT;
-
- lbs_deb_enter(LBS_DEB_CFG80211);
-
- survey->channel = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(priv->channel,
- IEEE80211_BAND_2GHZ));
-
- ret = lbs_get_rssi(priv, &signal, &noise);
- if (ret == 0) {
- survey->filled = SURVEY_INFO_NOISE_DBM;
- survey->noise = noise;
- }
-
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
- return ret;
-}
-
-
-
-
-/*
* Change interface
*/
@@ -2068,7 +2032,6 @@ static struct cfg80211_ops lbs_cfg80211_ops = {
.del_key = lbs_cfg_del_key,
.set_default_key = lbs_cfg_set_default_key,
.get_station = lbs_cfg_get_station,
- .dump_survey = lbs_get_survey,
.change_virtual_intf = lbs_change_intf,
.join_ibss = lbs_join_ibss,
.leave_ibss = lbs_leave_ibss,
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 3f7bf4d912b6..234ee88dec95 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -815,10 +815,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
lbs_deb_enter(LBS_DEB_CS);
card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL);
- if (!card) {
- pr_err("error in kzalloc\n");
+ if (!card)
goto out;
- }
+
card->p_dev = p_dev;
p_dev->priv = card;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index b5fbbc7947df..74da5f1ea243 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -261,10 +261,8 @@ static int if_usb_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf);
cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
- if (!cardp) {
- pr_err("Out of memory allocating private data\n");
+ if (!cardp)
goto error;
- }
setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
init_waitqueue_head(&cardp->fw_wq);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index aff8b5743af0..7ced130f4f9e 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -153,10 +153,8 @@ static int if_usb_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf);
cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
- if (!cardp) {
- pr_err("Out of memory allocating private data.\n");
+ if (!cardp)
goto error;
- }
setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
init_waitqueue_head(&cardp->fw_wq);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4b9e730d2c8a..b7ce6a6e355f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/ktime.h>
#include <net/genetlink.h>
#include "mac80211_hwsim.h"
@@ -321,11 +322,15 @@ struct mac80211_hwsim_data {
struct dentry *debugfs_group;
int power_level;
+
+ /* difference between this hw's clock and the real clock, in usecs */
+ u64 tsf_offset;
};
struct hwsim_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
+ __le64 rt_tsft;
u8 rt_flags;
u8 rt_rate;
__le16 rt_channel;
@@ -367,6 +372,28 @@ static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data)
+{
+ struct timeval tv = ktime_to_timeval(ktime_get_real());
+ u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+ return cpu_to_le64(now + data->tsf_offset);
+}
+
+static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ return le64_to_cpu(__mac80211_hwsim_get_tsf(data));
+}
+
+static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ struct timeval tv = ktime_to_timeval(ktime_get_real());
+ u64 now = tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+ data->tsf_offset = tsf - now;
+}
static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
struct sk_buff *tx_skb)
@@ -391,7 +418,9 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_RATE) |
+ (1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_CHANNEL));
+ hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
hdr->rt_flags = 0;
hdr->rt_rate = txrate->bitrate / 5;
hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
@@ -592,7 +621,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
return;
nla_put_failure:
- printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
}
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
@@ -610,7 +639,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
}
memset(&rx_status, 0, sizeof(rx_status));
- /* TODO: set mactime */
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
rx_status.freq = data->channel->center_freq;
rx_status.band = data->channel->band;
rx_status.rate_idx = info->control.rates[0].idx;
@@ -654,6 +683,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
if (mac80211_hwsim_addr_match(data2, hdr->addr1))
ack = true;
+ rx_status.mactime =
+ le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
}
@@ -667,6 +698,12 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
bool ack;
struct ieee80211_tx_info *txi;
u32 _pid;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
+ struct mac80211_hwsim_data *data = hw->priv;
+
+ if (ieee80211_is_beacon(mgmt->frame_control) ||
+ ieee80211_is_probe_resp(mgmt->frame_control))
+ mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
mac80211_hwsim_monitor_rx(hw, skb);
@@ -763,9 +800,11 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_vif *vif)
{
struct ieee80211_hw *hw = arg;
+ struct mac80211_hwsim_data *data = hw->priv;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
u32 _pid;
+ struct ieee80211_mgmt *mgmt;
hwsim_check_magic(vif);
@@ -779,6 +818,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
return;
info = IEEE80211_SKB_CB(skb);
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
+
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
@@ -1199,6 +1241,8 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
.flush = mac80211_hwsim_flush,
+ .get_tsf = mac80211_hwsim_get_tsf,
+ .set_tsf = mac80211_hwsim_set_tsf,
};
@@ -1564,7 +1608,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
return 0;
err:
- printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
goto out;
out:
dev_kfree_skb(skb);
@@ -1580,11 +1624,11 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
wmediumd_pid = info->snd_pid;
printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
- "switching to wmediumd mode with pid %d\n", info->snd_pid);
+ "switching to wmediumd mode with pid %d\n", info->snd_pid);
return 0;
out:
- printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
return -EINVAL;
}
@@ -1647,7 +1691,7 @@ static int hwsim_init_netlink(void)
return 0;
failure:
- printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
return -EINVAL;
}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 34bba5234294..a5e182b5e944 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -44,16 +44,16 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
ht_cap->ht_cap.ampdu_params_info =
(sband->ht_cap.ampdu_factor &
- IEEE80211_HT_AMPDU_PARM_FACTOR)|
+ IEEE80211_HT_AMPDU_PARM_FACTOR) |
((sband->ht_cap.ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
- sizeof(sband->ht_cap.mcs));
+ sizeof(sband->ht_cap.mcs));
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
- (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
@@ -69,8 +69,8 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
* table which matches the requested BA status.
*/
static struct mwifiex_tx_ba_stream_tbl *
-mwifiex_11n_get_tx_ba_stream_status(struct mwifiex_private *priv,
- enum mwifiex_ba_status ba_status)
+mwifiex_get_ba_status(struct mwifiex_private *priv,
+ enum mwifiex_ba_status ba_status)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
unsigned long flags;
@@ -107,12 +107,11 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
tid = del_ba_param_set >> DELBA_TID_POS;
if (del_ba->del_result == BA_RESULT_SUCCESS) {
- mwifiex_11n_delete_ba_stream_tbl(priv, tid,
- del_ba->peer_mac_addr, TYPE_DELBA_SENT,
- INITIATOR_BIT(del_ba_param_set));
+ mwifiex_del_ba_tbl(priv, tid, del_ba->peer_mac_addr,
+ TYPE_DELBA_SENT,
+ INITIATOR_BIT(del_ba_param_set));
- tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_status(priv,
- BA_STREAM_SETUP_INPROGRESS);
+ tx_ba_tbl = mwifiex_get_ba_status(priv, BA_SETUP_INPROGRESS);
if (tx_ba_tbl)
mwifiex_send_addba(priv, tx_ba_tbl->tid,
tx_ba_tbl->ra);
@@ -120,18 +119,17 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
* In case of failure, recreate the deleted stream in case
* we initiated the ADDBA
*/
- if (INITIATOR_BIT(del_ba_param_set)) {
- mwifiex_11n_create_tx_ba_stream_tbl(priv,
- del_ba->peer_mac_addr, tid,
- BA_STREAM_SETUP_INPROGRESS);
-
- tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_status(priv,
- BA_STREAM_SETUP_INPROGRESS);
- if (tx_ba_tbl)
- mwifiex_11n_delete_ba_stream_tbl(priv,
- tx_ba_tbl->tid, tx_ba_tbl->ra,
- TYPE_DELBA_SENT, true);
- }
+ if (!INITIATOR_BIT(del_ba_param_set))
+ return 0;
+
+ mwifiex_create_ba_tbl(priv, del_ba->peer_mac_addr, tid,
+ BA_SETUP_INPROGRESS);
+
+ tx_ba_tbl = mwifiex_get_ba_status(priv, BA_SETUP_INPROGRESS);
+
+ if (tx_ba_tbl)
+ mwifiex_del_ba_tbl(priv, tx_ba_tbl->tid, tx_ba_tbl->ra,
+ TYPE_DELBA_SENT, true);
}
return 0;
@@ -160,18 +158,17 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
& IEEE80211_ADDBA_PARAM_TID_MASK)
>> BLOCKACKPARAM_TID_POS;
if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
- tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid,
+ tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tx_ba_tbl) {
dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
- tx_ba_tbl->ba_status = BA_STREAM_SETUP_COMPLETE;
+ tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
} else {
dev_err(priv->adapter->dev, "BA stream not created\n");
}
} else {
- mwifiex_11n_delete_ba_stream_tbl(priv, tid,
- add_ba_rsp->peer_mac_addr,
- TYPE_DELBA_SENT, true);
+ mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
+ TYPE_DELBA_SENT, true);
if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
priv->aggr_prio_tbl[tid].ampdu_ap =
BA_STREAM_NOT_ALLOWED;
@@ -392,9 +389,9 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
chan_list->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- if ((sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- && (bss_desc->bcn_ht_info->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
+ if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ bss_desc->bcn_ht_info->ht_param &
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_info->ht_param &
@@ -467,7 +464,7 @@ mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu);
dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n",
- max_amsdu, priv->adapter->max_tx_buf_size);
+ max_amsdu, priv->adapter->max_tx_buf_size);
if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K)
curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
@@ -507,7 +504,7 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl)
{
if (!tx_ba_tsr_tbl &&
- mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
+ mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
return;
dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
@@ -544,16 +541,15 @@ void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv)
* table which matches the given RA/TID pair.
*/
struct mwifiex_tx_ba_stream_tbl *
-mwifiex_11n_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
- int tid, u8 *ra)
+mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
unsigned long flags;
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
- if ((!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN))
- && (tx_ba_tsr_tbl->tid == tid)) {
+ if (!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN) &&
+ tx_ba_tsr_tbl->tid == tid) {
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
flags);
return tx_ba_tsr_tbl;
@@ -567,14 +563,13 @@ mwifiex_11n_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
* This function creates an entry in Tx BA stream table for the
* given RA/TID pair.
*/
-void mwifiex_11n_create_tx_ba_stream_tbl(struct mwifiex_private *priv,
- u8 *ra, int tid,
- enum mwifiex_ba_status ba_status)
+void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
+ enum mwifiex_ba_status ba_status)
{
struct mwifiex_tx_ba_stream_tbl *new_node;
unsigned long flags;
- if (!mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, ra)) {
+ if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl),
GFP_ATOMIC);
if (!new_node) {
@@ -668,9 +663,8 @@ void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba)
tid = del_ba_param_set >> DELBA_TID_POS;
- mwifiex_11n_delete_ba_stream_tbl(priv, tid, cmd_del_ba->peer_mac_addr,
- TYPE_DELBA_RECEIVE,
- INITIATOR_BIT(del_ba_param_set));
+ mwifiex_del_ba_tbl(priv, tid, cmd_del_ba->peer_mac_addr,
+ TYPE_DELBA_RECEIVE, INITIATOR_BIT(del_ba_param_set));
}
/*
@@ -724,7 +718,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
- __func__, rx_reo_tbl->tid);
+ __func__, rx_reo_tbl->tid);
memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
rx_reo_tbl++;
count++;
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 90b421e343d4..77646d777dce 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -46,13 +46,12 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
struct mwifiex_tx_ba_stream_tbl
*tx_tbl);
void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv);
-struct mwifiex_tx_ba_stream_tbl *mwifiex_11n_get_tx_ba_stream_tbl(struct
+struct mwifiex_tx_ba_stream_tbl *mwifiex_get_ba_tbl(struct
mwifiex_private
*priv, int tid,
u8 *ra);
-void mwifiex_11n_create_tx_ba_stream_tbl(struct mwifiex_private *priv, u8 *ra,
- int tid,
- enum mwifiex_ba_status ba_status);
+void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
+ enum mwifiex_ba_status ba_status);
int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac);
int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
int initiator);
@@ -87,9 +86,8 @@ mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
static inline u8
mwifiex_is_amsdu_allowed(struct mwifiex_private *priv, int tid)
{
- return (((priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED)
- && ((priv->is_data_rate_auto)
- || !((priv->bitmap_rates[2]) & 0x03)))
+ return (((priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED) &&
+ (priv->is_data_rate_auto || !(priv->bitmap_rates[2] & 0x03)))
? true : false);
}
@@ -150,11 +148,11 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
*/
static inline int
mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int tid)
+ struct mwifiex_ra_list_tbl *ptr, int tid)
{
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
- tx_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, ptr->ra);
+ tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
return true;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 079e5532e686..9eefb2a0ce9f 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -84,7 +84,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
/* Add payload */
skb_put(skb_aggr, skb_src->len);
memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
- skb_src->len);
+ skb_src->len);
*pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
LLC_SNAP_LEN)) & 3)) : 0;
skb_put(skb_aggr, *pad);
@@ -119,14 +119,14 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
- sizeof(*local_tx_pd));
+ sizeof(*local_tx_pd));
if (local_tx_pd->tx_control == 0)
/* TxCtrl set by user or default */
local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
- if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
- (priv->adapter->pps_uapsd_mode)) {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ priv->adapter->pps_uapsd_mode) {
if (true == mwifiex_check_last_packet_indication(priv)) {
priv->adapter->tx_lock_flag = true;
local_tx_pd->flags =
@@ -182,7 +182,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
- tx_info_aggr->bss_index = tx_info_src->bss_index;
+ tx_info_aggr->bss_type = tx_info_src->bss_type;
+ tx_info_aggr->bss_num = tx_info_src->bss_num;
skb_aggr->priority = skb_src->priority;
do {
@@ -256,9 +257,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
mwifiex_write_data_complete(adapter, skb_aggr, -1);
return -1;
}
- if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
- (adapter->pps_uapsd_mode) &&
- (adapter->tx_lock_flag)) {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ adapter->pps_uapsd_mode && adapter->tx_lock_flag) {
priv->adapter->tx_lock_flag = false;
if (ptx_pd)
ptx_pd->flags = 0;
@@ -278,7 +278,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
case -1:
adapter->data_sent = false;
dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
- __func__, ret);
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb_aggr, ret);
return 0;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 681d3f2a4c28..9c44088054dd 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -27,31 +27,31 @@
#include "11n_rxreorder.h"
/*
- * This function dispatches all packets in the Rx reorder table.
+ * This function dispatches all packets in the Rx reorder table until the
+ * start window.
*
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
static void
-mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
- struct mwifiex_rx_reorder_tbl
- *rx_reor_tbl_ptr, int start_win)
+mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
+ struct mwifiex_rx_reorder_tbl *tbl, int start_win)
{
- int no_pkt_to_send, i;
+ int pkt_to_send, i;
void *rx_tmp_ptr;
unsigned long flags;
- no_pkt_to_send = (start_win > rx_reor_tbl_ptr->start_win) ?
- min((start_win - rx_reor_tbl_ptr->start_win),
- rx_reor_tbl_ptr->win_size) : rx_reor_tbl_ptr->win_size;
+ pkt_to_send = (start_win > tbl->start_win) ?
+ min((start_win - tbl->start_win), tbl->win_size) :
+ tbl->win_size;
- for (i = 0; i < no_pkt_to_send; ++i) {
+ for (i = 0; i < pkt_to_send; ++i) {
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
- if (rx_reor_tbl_ptr->rx_reorder_ptr[i]) {
- rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i];
- rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL;
+ if (tbl->rx_reorder_ptr[i]) {
+ rx_tmp_ptr = tbl->rx_reorder_ptr[i];
+ tbl->rx_reorder_ptr[i] = NULL;
}
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
@@ -63,13 +63,12 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
*/
- for (i = 0; i < rx_reor_tbl_ptr->win_size - no_pkt_to_send; ++i) {
- rx_reor_tbl_ptr->rx_reorder_ptr[i] =
- rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i];
- rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i] = NULL;
+ for (i = 0; i < tbl->win_size - pkt_to_send; ++i) {
+ tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i];
+ tbl->rx_reorder_ptr[pkt_to_send + i] = NULL;
}
- rx_reor_tbl_ptr->start_win = start_win;
+ tbl->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
@@ -83,20 +82,20 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
- struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
+ struct mwifiex_rx_reorder_tbl *tbl)
{
int i, j, xchg;
void *rx_tmp_ptr;
unsigned long flags;
- for (i = 0; i < rx_reor_tbl_ptr->win_size; ++i) {
+ for (i = 0; i < tbl->win_size; ++i) {
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
- if (!rx_reor_tbl_ptr->rx_reorder_ptr[i]) {
+ if (!tbl->rx_reorder_ptr[i]) {
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
break;
}
- rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i];
- rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL;
+ rx_tmp_ptr = tbl->rx_reorder_ptr[i];
+ tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
}
@@ -107,15 +106,13 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
* circular buffer
*/
if (i > 0) {
- xchg = rx_reor_tbl_ptr->win_size - i;
+ xchg = tbl->win_size - i;
for (j = 0; j < xchg; ++j) {
- rx_reor_tbl_ptr->rx_reorder_ptr[j] =
- rx_reor_tbl_ptr->rx_reorder_ptr[i + j];
- rx_reor_tbl_ptr->rx_reorder_ptr[i + j] = NULL;
+ tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j];
+ tbl->rx_reorder_ptr[i + j] = NULL;
}
}
- rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
- &(MAX_TID_VALUE - 1);
+ tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
@@ -126,28 +123,25 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
* pending packets in the Rx reorder table before deletion.
*/
static void
-mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private *priv,
- struct mwifiex_rx_reorder_tbl
- *rx_reor_tbl_ptr)
+mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
+ struct mwifiex_rx_reorder_tbl *tbl)
{
unsigned long flags;
- if (!rx_reor_tbl_ptr)
+ if (!tbl)
return;
- mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr,
- (rx_reor_tbl_ptr->start_win +
- rx_reor_tbl_ptr->win_size)
- &(MAX_TID_VALUE - 1));
+ mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) &
+ (MAX_TID_VALUE - 1));
- del_timer(&rx_reor_tbl_ptr->timer_context.timer);
+ del_timer(&tbl->timer_context.timer);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_del(&rx_reor_tbl_ptr->list);
+ list_del(&tbl->list);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
- kfree(rx_reor_tbl_ptr->rx_reorder_ptr);
- kfree(rx_reor_tbl_ptr);
+ kfree(tbl->rx_reorder_ptr);
+ kfree(tbl);
}
/*
@@ -157,16 +151,15 @@ mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private *priv,
static struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
- struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ struct mwifiex_rx_reorder_tbl *tbl;
unsigned long flags;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
- if ((!memcmp(rx_reor_tbl_ptr->ta, ta, ETH_ALEN))
- && (rx_reor_tbl_ptr->tid == tid)) {
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
- return rx_reor_tbl_ptr;
+ return tbl;
}
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -200,19 +193,19 @@ mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr)
static void
mwifiex_flush_data(unsigned long context)
{
- struct reorder_tmr_cnxt *reorder_cnxt =
+ struct reorder_tmr_cnxt *ctx =
(struct reorder_tmr_cnxt *) context;
int start_win;
- start_win = mwifiex_11n_find_last_seq_num(reorder_cnxt->ptr);
- if (start_win >= 0) {
- dev_dbg(reorder_cnxt->priv->adapter->dev,
- "info: flush data %d\n", start_win);
- mwifiex_11n_dispatch_pkt_until_start_win(reorder_cnxt->priv,
- reorder_cnxt->ptr,
- ((reorder_cnxt->ptr->start_win +
- start_win + 1) & (MAX_TID_VALUE - 1)));
- }
+ start_win = mwifiex_11n_find_last_seq_num(ctx->ptr);
+
+ if (start_win < 0)
+ return;
+
+ dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win);
+ mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr,
+ (ctx->ptr->start_win + start_win + 1) &
+ (MAX_TID_VALUE - 1));
}
/*
@@ -227,10 +220,10 @@ mwifiex_flush_data(unsigned long context)
*/
static void
mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
- int tid, int win_size, int seq_num)
+ int tid, int win_size, int seq_num)
{
int i;
- struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr, *new_node;
+ struct mwifiex_rx_reorder_tbl *tbl, *new_node;
u16 last_seq = 0;
unsigned long flags;
@@ -238,17 +231,16 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
- rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
- if (rx_reor_tbl_ptr) {
- mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr,
- seq_num);
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
+ if (tbl) {
+ mwifiex_11n_dispatch_pkt(priv, tbl, seq_num);
return;
}
- /* if !rx_reor_tbl_ptr then create one */
+ /* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node) {
dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n",
- __func__);
+ __func__);
return;
}
@@ -360,7 +352,8 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
- tid, win_size, le16_to_cpu(cmd_addba_req->ssn));
+ tid, win_size,
+ le16_to_cpu(cmd_addba_req->ssn));
return 0;
}
@@ -401,35 +394,34 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u16 seq_num, u16 tid,
u8 *ta, u8 pkt_type, void *payload)
{
- struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ struct mwifiex_rx_reorder_tbl *tbl;
int start_win, end_win, win_size;
u16 pkt_index;
- rx_reor_tbl_ptr =
- mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
- tid, ta);
- if (!rx_reor_tbl_ptr) {
+ tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
+ tid, ta);
+ if (!tbl) {
if (pkt_type != PKT_TYPE_BAR)
mwifiex_process_rx_packet(priv->adapter, payload);
return 0;
}
- start_win = rx_reor_tbl_ptr->start_win;
- win_size = rx_reor_tbl_ptr->win_size;
+ start_win = tbl->start_win;
+ win_size = tbl->win_size;
end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
- del_timer(&rx_reor_tbl_ptr->timer_context.timer);
- mod_timer(&rx_reor_tbl_ptr->timer_context.timer, jiffies
- + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000);
+ del_timer(&tbl->timer_context.timer);
+ mod_timer(&tbl->timer_context.timer,
+ jiffies + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000);
/*
* If seq_num is less then starting win then ignore and drop the
* packet
*/
if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */
- if (seq_num >= ((start_win + (TWOPOW11)) & (MAX_TID_VALUE - 1))
- && (seq_num < start_win))
+ if (seq_num >= ((start_win + TWOPOW11) &
+ (MAX_TID_VALUE - 1)) && (seq_num < start_win))
return -1;
- } else if ((seq_num < start_win)
- || (seq_num > (start_win + (TWOPOW11)))) {
+ } else if ((seq_num < start_win) ||
+ (seq_num > (start_win + TWOPOW11))) {
return -1;
}
@@ -440,17 +432,17 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
if (pkt_type == PKT_TYPE_BAR)
seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
- if (((end_win < start_win)
- && (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win)))
- && (seq_num > end_win)) || ((end_win > start_win)
- && ((seq_num > end_win) || (seq_num < start_win)))) {
+ if (((end_win < start_win) &&
+ (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) &&
+ (seq_num > end_win)) ||
+ ((end_win > start_win) && ((seq_num > end_win) ||
+ (seq_num < start_win)))) {
end_win = seq_num;
if (((seq_num - win_size) + 1) >= 0)
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
- mwifiex_11n_dispatch_pkt_until_start_win(priv,
- rx_reor_tbl_ptr, start_win);
+ mwifiex_11n_dispatch_pkt(priv, tbl, start_win);
}
if (pkt_type != PKT_TYPE_BAR) {
@@ -459,17 +451,17 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
else
pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
- if (rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index])
+ if (tbl->rx_reorder_ptr[pkt_index])
return -1;
- rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index] = payload;
+ tbl->rx_reorder_ptr[pkt_index] = payload;
}
/*
* Dispatch all packets sequentially from start_win until a
* hole is found and adjust the start_win appropriately
*/
- mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
+ mwifiex_11n_scan_and_dispatch(priv, tbl);
return 0;
}
@@ -480,10 +472,10 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
* The TID/TA are taken from del BA event body.
*/
void
-mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int tid,
- u8 *peer_mac, u8 type, int initiator)
+mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
+ u8 type, int initiator)
{
- struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ struct mwifiex_rx_reorder_tbl *tbl;
struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
u8 cleanup_rx_reorder_tbl;
unsigned long flags;
@@ -493,23 +485,23 @@ mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int tid,
else
cleanup_rx_reorder_tbl = (initiator) ? false : true;
- dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d, "
- "initiator=%d\n", peer_mac, tid, initiator);
+ dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
+ peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
- rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
- if (!rx_reor_tbl_ptr) {
+ if (!tbl) {
dev_dbg(priv->adapter->dev,
- "event: TID, TA not found in table\n");
+ "event: TID, TA not found in table\n");
return;
}
- mwifiex_11n_delete_rx_reorder_tbl_entry(priv, rx_reor_tbl_ptr);
+ mwifiex_del_rx_reorder_entry(priv, tbl);
} else {
- ptx_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, peer_mac);
+ ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
dev_dbg(priv->adapter->dev,
- "event: TID, RA not found in table\n");
+ "event: TID, RA not found in table\n");
return;
}
@@ -532,7 +524,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
(struct host_cmd_ds_11n_addba_rsp *)
&resp->params.add_ba_rsp;
int tid, win_size;
- struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -548,19 +540,18 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
- dev_dbg(priv->adapter->dev, "cmd: ADDBA RSP: %pM"
- " tid=%d ssn=%d win_size=%d\n",
- add_ba_rsp->peer_mac_addr,
- tid, add_ba_rsp->ssn, win_size);
+ dev_dbg(priv->adapter->dev,
+ "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+ add_ba_rsp->peer_mac_addr, tid,
+ add_ba_rsp->ssn, win_size);
} else {
dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
- add_ba_rsp->peer_mac_addr, tid);
+ add_ba_rsp->peer_mac_addr, tid);
- rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv,
- tid, add_ba_rsp->peer_mac_addr);
- if (rx_reor_tbl_ptr)
- mwifiex_11n_delete_rx_reorder_tbl_entry(priv,
- rx_reor_tbl_ptr);
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
+ add_ba_rsp->peer_mac_addr);
+ if (tbl)
+ mwifiex_del_rx_reorder_entry(priv, tbl);
}
return 0;
@@ -599,7 +590,7 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->rx_reorder_tbl_ptr, list) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
- mwifiex_11n_delete_rx_reorder_tbl_entry(priv, del_tbl_ptr);
+ mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 033c8adbdcd4..f1bffebabc60 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -41,9 +41,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
u16 seqNum,
u16 tid, u8 *ta,
u8 pkttype, void *payload);
-void mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int Tid,
- u8 *PeerMACAddr, u8 type,
- int initiator);
+void mwifiex_del_ba_tbl(struct mwifiex_private *priv, int Tid,
+ u8 *PeerMACAddr, u8 type, int initiator);
void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
struct host_cmd_ds_11n_batimeout *event);
int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 2210a0f9af2d..65050384c42b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -79,7 +79,7 @@ static int
mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
if (mwifiex_set_encode(priv, NULL, 0, key_index, 1)) {
wiphy_err(wiphy, "deleting the crypto keys\n");
@@ -122,13 +122,12 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev,
bool enabled, int timeout)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u32 ps_mode;
if (timeout)
wiphy_dbg(wiphy,
- "info: ignoring the timeout value"
- " for IEEE power save\n");
+ "info: ignore timeout value for IEEE Power Save\n");
ps_mode = enabled;
@@ -143,10 +142,10 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool unicast,
bool multicast)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
/* Return if WEP key not configured */
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED)
+ if (!priv->sec_info.wep_enabled)
return 0;
if (mwifiex_set_encode(priv, NULL, 0, key_index, 0)) {
@@ -165,10 +164,10 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
if (mwifiex_set_encode(priv, params->key, params->key_len,
- key_index, 0)) {
+ key_index, 0)) {
wiphy_err(wiphy, "crypto keys added\n");
return -EFAULT;
}
@@ -225,7 +224,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
}
if (ch->hw_value == next_chan + 1 &&
- ch->max_power == max_pwr) {
+ ch->max_power == max_pwr) {
next_chan++;
no_of_parsed_chan++;
} else {
@@ -252,7 +251,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
domain_info->no_of_triplet = no_of_triplet;
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "11D: setting domain info in FW\n");
return -1;
}
@@ -271,7 +270,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
* - Set bt Country IE
*/
static int mwifiex_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+ struct regulatory_request *request)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
@@ -316,7 +315,7 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
if (chan->band == IEEE80211_BAND_2GHZ) {
if (channel_type == NL80211_CHAN_NO_HT)
if (priv->adapter->config_bands == BAND_B ||
- priv->adapter->config_bands == BAND_G)
+ priv->adapter->config_bands == BAND_G)
config_bands =
priv->adapter->config_bands;
else
@@ -336,7 +335,7 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
adapter->adhoc_start_band = config_bands;
if ((config_bands & BAND_GN) ||
- (config_bands & BAND_AN))
+ (config_bands & BAND_AN))
adapter->adhoc_11n_enabled = true;
else
adapter->adhoc_11n_enabled = false;
@@ -350,9 +349,8 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
mwifiex_send_domain_info_cmd_fw(wiphy);
}
- wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
- "mode %d\n", config_bands, adapter->sec_chan_offset,
- priv->bss_mode);
+ wiphy_dbg(wiphy, "info: setting band %d, chan offset %d, mode %d\n",
+ config_bands, adapter->sec_chan_offset, priv->bss_mode);
if (!chan)
return 0;
@@ -376,7 +374,12 @@ mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv;
+
+ if (dev)
+ priv = mwifiex_netdev_get_priv(dev);
+ else
+ priv = mwifiex_cfg80211_get_priv(wiphy);
if (priv->media_connected) {
wiphy_err(wiphy, "This setting is valid only when station "
@@ -398,8 +401,8 @@ mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
{
int ret;
- if (frag_thr < MWIFIEX_FRAG_MIN_VALUE
- || frag_thr > MWIFIEX_FRAG_MAX_VALUE)
+ if (frag_thr < MWIFIEX_FRAG_MIN_VALUE ||
+ frag_thr > MWIFIEX_FRAG_MAX_VALUE)
return -EINVAL;
/* Send request to firmware */
@@ -534,6 +537,11 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
ret = -EFAULT;
}
+ /* Get DTIM period information from firmware */
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
+ &priv->dtim_period);
+
/*
* Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
* MCS index values for us are 0 to 7.
@@ -557,6 +565,22 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
sinfo->txrate.legacy = rate.rate * 5;
+ if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->bss_param.flags = 0;
+ if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
+ sinfo->bss_param.flags |=
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE;
+ if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ sinfo->bss_param.flags |=
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+ sinfo->bss_param.dtim_period = priv->dtim_period;
+ sinfo->bss_param.beacon_interval =
+ priv->curr_bss_params.bss_descriptor.beacon_period;
+ }
+
return ret;
}
@@ -587,7 +611,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
{.bitrate = 20, .hw_value = 4, },
{.bitrate = 55, .hw_value = 11, },
{.bitrate = 110, .hw_value = 22, },
- {.bitrate = 220, .hw_value = 44, },
{.bitrate = 60, .hw_value = 12, },
{.bitrate = 90, .hw_value = 18, },
{.bitrate = 120, .hw_value = 24, },
@@ -596,7 +619,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
{.bitrate = 360, .hw_value = 72, },
{.bitrate = 480, .hw_value = 96, },
{.bitrate = 540, .hw_value = 108, },
- {.bitrate = 720, .hw_value = 144, },
};
/* Channel definitions to be advertised to cfg80211 */
@@ -622,7 +644,7 @@ static struct ieee80211_supported_band mwifiex_band_2ghz = {
.channels = mwifiex_channels_2ghz,
.n_channels = ARRAY_SIZE(mwifiex_channels_2ghz),
.bitrates = mwifiex_rates,
- .n_bitrates = 14,
+ .n_bitrates = ARRAY_SIZE(mwifiex_rates),
};
static struct ieee80211_channel mwifiex_channels_5ghz[] = {
@@ -662,8 +684,8 @@ static struct ieee80211_channel mwifiex_channels_5ghz[] = {
static struct ieee80211_supported_band mwifiex_band_5ghz = {
.channels = mwifiex_channels_5ghz,
.n_channels = ARRAY_SIZE(mwifiex_channels_5ghz),
- .bitrates = mwifiex_rates - 4,
- .n_bitrates = ARRAY_SIZE(mwifiex_rates) + 4,
+ .bitrates = mwifiex_rates + 4,
+ .n_bitrates = ARRAY_SIZE(mwifiex_rates) - 4,
};
@@ -722,8 +744,7 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
adapter->channel_type = NL80211_CHAN_NO_HT;
wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
- (mode & BAND_B) ? "b" : "",
- (mode & BAND_G) ? "g" : "");
+ (mode & BAND_B) ? "b" : "", (mode & BAND_G) ? "g" : "");
return 0;
}
@@ -778,8 +799,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ie_buf[1] = bss_info.ssid.ssid_len;
memcpy(&ie_buf[sizeof(struct ieee_types_header)],
- &bss_info.ssid.ssid,
- bss_info.ssid.ssid_len);
+ &bss_info.ssid.ssid, bss_info.ssid.ssid_len);
ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
@@ -788,8 +808,8 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
band));
bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
- bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
- 0, ie_buf, ie_len, 0, GFP_KERNEL);
+ bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
+ 0, ie_buf, ie_len, 0, GFP_KERNEL);
cfg80211_put_bss(bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
@@ -815,12 +835,12 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
u8 *bssid, int mode, struct ieee80211_channel *channel,
struct cfg80211_connect_params *sme, bool privacy)
{
- struct mwifiex_802_11_ssid req_ssid;
+ struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
struct cfg80211_bss *bss = NULL;
u8 is_scanning_required = 0;
- memset(&req_ssid, 0, sizeof(struct mwifiex_802_11_ssid));
+ memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
req_ssid.ssid_len = ssid_len;
if (ssid_len > IEEE80211_MAX_SSID_LEN) {
@@ -847,6 +867,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
priv->sec_info.wpa2_enabled = false;
priv->wep_key_curr_index = 0;
priv->sec_info.encryption_mode = 0;
+ priv->sec_info.is_authtype_auto = 0;
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
@@ -868,11 +889,12 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
}
/* Now handle infra mode. "sme" is valid for infra mode only */
- if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC
- || sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
+ if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
- else if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY)
- auth_type = NL80211_AUTHTYPE_SHARED_KEY;
+ priv->sec_info.is_authtype_auto = 1;
+ } else {
+ auth_type = sme->auth_type;
+ }
if (sme->crypto.n_ciphers_pairwise) {
priv->sec_info.encryption_mode =
@@ -898,12 +920,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
}
}
done:
- /* Do specific SSID scanning */
- if (mwifiex_request_scan(priv, &req_ssid)) {
- dev_err(priv->adapter->dev, "scan error\n");
- return -EFAULT;
- }
-
/*
* Scan entries are valid for some time (15 sec). So we can save one
* active scan time if we just try cfg80211_get_bss first. If it fails
@@ -932,14 +948,15 @@ done:
if (!bss) {
if (is_scanning_required) {
- dev_warn(priv->adapter->dev, "assoc: requested "
- "bss not found in scan results\n");
+ dev_warn(priv->adapter->dev,
+ "assoc: requested bss not found in scan results\n");
break;
}
is_scanning_required = 1;
} else {
- dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
- (char *) req_ssid.ssid, bss->bssid);
+ dev_dbg(priv->adapter->dev,
+ "info: trying to associate to '%s' bssid %pM\n",
+ (char *) req_ssid.ssid, bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
}
@@ -979,7 +996,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
- (char *) sme->ssid, sme->bssid);
+ (char *) sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
@@ -1011,7 +1028,7 @@ static int
mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret = 0;
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
@@ -1021,11 +1038,11 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
}
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
- (char *) params->ssid, params->bssid);
+ (char *) params->ssid, params->bssid);
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
- params->bssid, priv->bss_mode,
- params->channel, NULL, params->privacy);
+ params->bssid, priv->bss_mode,
+ params->channel, NULL, params->privacy);
done:
if (!ret) {
cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
@@ -1049,10 +1066,10 @@ done:
static int
mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
- priv->cfg_bssid);
+ priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -1081,17 +1098,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
priv->scan_request = request;
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!priv->user_scan_cfg) {
dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
return -ENOMEM;
}
- for (i = 0; i < request->n_ssids; i++) {
- memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
- request->ssids[i].ssid, request->ssids[i].ssid_len);
- priv->user_scan_cfg->ssid_list[i].max_len =
- request->ssids[i].ssid_len;
- }
+
+ priv->user_scan_cfg->num_ssids = request->n_ssids;
+ priv->user_scan_cfg->ssid_list = request->ssids;
+
for (i = 0; i < request->n_channels; i++) {
chan = request->channels[i];
priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
@@ -1099,10 +1114,10 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
priv->user_scan_cfg->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_PASSIVE;
+ MWIFIEX_SCAN_TYPE_PASSIVE;
else
priv->user_scan_cfg->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_ACTIVE;
+ MWIFIEX_SCAN_TYPE_ACTIVE;
priv->user_scan_cfg->chan_list[i].scan_time = 0;
}
@@ -1173,9 +1188,9 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
memset(mcs, 0xff, rx_mcs_supp);
/* Clear all the other values */
memset(&mcs[rx_mcs_supp], 0,
- sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
+ sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
- ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
+ ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
SETHT_MCS32(mcs_set.rx_mask);
@@ -1188,10 +1203,10 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
* create a new virtual interface with the given name
*/
struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
struct mwifiex_adapter *adapter;
@@ -1224,7 +1239,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
- priv->bss_index = 0;
priv->bss_num = 0;
break;
@@ -1288,10 +1302,7 @@ EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
*/
int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
-
- if (!priv || !dev)
- return 0;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_remove(priv);
@@ -1353,11 +1364,12 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
int ret;
void *wdev_priv;
struct wireless_dev *wdev;
+ struct ieee80211_sta_ht_cap *ht_info;
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev) {
dev_err(priv->adapter->dev, "%s: allocating wireless device\n",
- __func__);
+ __func__);
return -ENOMEM;
}
wdev->wiphy =
@@ -1369,17 +1381,17 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
}
wdev->iftype = NL80211_IFTYPE_STATION;
wdev->wiphy->max_scan_ssids = 10;
- wdev->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
- mwifiex_setup_ht_caps(
- &wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+ ht_info = &wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap;
+ mwifiex_setup_ht_caps(ht_info, priv);
if (priv->adapter->config_bands & BAND_A) {
wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
- mwifiex_setup_ht_caps(
- &wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+ ht_info = &wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap;
+ mwifiex_setup_ht_caps(ht_info, priv);
} else {
wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
}
@@ -1406,13 +1418,13 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
ret = wiphy_register(wdev->wiphy);
if (ret < 0) {
dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
- __func__);
+ __func__);
wiphy_free(wdev->wiphy);
kfree(wdev);
return ret;
} else {
dev_dbg(priv->adapter->dev,
- "info: successfully registered wiphy device\n");
+ "info: successfully registered wiphy device\n");
}
priv->wdev = wdev;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 1782a77f15dc..2fe1c33765b8 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -163,65 +163,24 @@ u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv, u8 *rates)
return mwifiex_get_supported_rates(priv, rates);
else
return mwifiex_copy_rates(rates, 0,
- priv->curr_bss_params.data_rates,
- priv->curr_bss_params.num_of_rates);
+ priv->curr_bss_params.data_rates,
+ priv->curr_bss_params.num_of_rates);
}
/*
* This function locates the Channel-Frequency-Power triplet based upon
- * band and channel parameters.
+ * band and channel/frequency parameters.
*/
struct mwifiex_chan_freq_power *
-mwifiex_get_cfp_by_band_and_channel_from_cfg80211(struct mwifiex_private
- *priv, u8 band, u16 channel)
+mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
{
struct mwifiex_chan_freq_power *cfp = NULL;
struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
+ struct ieee80211_channel *ch = NULL;
int i;
- if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
- sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
- else
- sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
-
- if (!sband) {
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
- " & channel %d\n", __func__, band, channel);
+ if (!channel && !freq)
return cfp;
- }
-
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
- if (((ch->hw_value == channel) ||
- (channel == FIRST_VALID_CHANNEL))
- && !(ch->flags & IEEE80211_CHAN_DISABLED)) {
- priv->cfp.channel = channel;
- priv->cfp.freq = ch->center_freq;
- priv->cfp.max_tx_power = ch->max_power;
- cfp = &priv->cfp;
- break;
- }
- }
- if (i == sband->n_channels)
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
- " & channel %d\n", __func__, band, channel);
-
- return cfp;
-}
-
-/*
- * This function locates the Channel-Frequency-Power triplet based upon
- * band and frequency parameters.
- */
-struct mwifiex_chan_freq_power *
-mwifiex_get_cfp_by_band_and_freq_from_cfg80211(struct mwifiex_private *priv,
- u8 band, u32 freq)
-{
- struct mwifiex_chan_freq_power *cfp = NULL;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- int i;
if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -229,25 +188,40 @@ mwifiex_get_cfp_by_band_and_freq_from_cfg80211(struct mwifiex_private *priv,
sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband) {
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
- " & freq %d\n", __func__, band, freq);
+ dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
+ __func__, band);
return cfp;
}
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
- if ((ch->center_freq == freq) &&
- !(ch->flags & IEEE80211_CHAN_DISABLED)) {
- priv->cfp.channel = ch->hw_value;
- priv->cfp.freq = freq;
- priv->cfp.max_tx_power = ch->max_power;
- cfp = &priv->cfp;
- break;
+
+ if (ch->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ if (freq) {
+ if (ch->center_freq == freq)
+ break;
+ } else {
+ /* find by valid channel*/
+ if (ch->hw_value == channel ||
+ channel == FIRST_VALID_CHANNEL)
+ break;
}
}
- if (i == sband->n_channels)
+ if (i == sband->n_channels) {
dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
- " & freq %d\n", __func__, band, freq);
+ " & channel=%d freq=%d\n", __func__, band, channel,
+ freq);
+ } else {
+ if (!ch)
+ return cfp;
+
+ priv->cfp.channel = ch->hw_value;
+ priv->cfp.freq = ch->center_freq;
+ priv->cfp.max_tx_power = ch->max_power;
+ cfp = &priv->cfp;
+ }
return cfp;
}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 6e0a3eaecf70..07f6e0092552 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -67,7 +67,7 @@ mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
return NULL;
}
cmd_node = list_first_entry(&adapter->cmd_free_q,
- struct cmd_ctrl_node, list);
+ struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
@@ -158,8 +158,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
/* Set command sequence number */
adapter->seq_num++;
host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
- (adapter->seq_num, cmd_node->priv->bss_num,
- cmd_node->priv->bss_type));
+ (adapter->seq_num,
+ cmd_node->priv->bss_num,
+ cmd_node->priv->bss_type));
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = cmd_node;
@@ -174,8 +175,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d,"
" seqno %#x\n",
tstamp.tv_sec, tstamp.tv_usec, cmd_code,
- le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
- le16_to_cpu(host_cmd->seq_num));
+ le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
+ le16_to_cpu(host_cmd->seq_num));
skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
@@ -200,17 +201,17 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
/* Save the last command id and action to debug log */
adapter->dbg.last_cmd_index =
- (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
+ (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code;
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
- le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
+ le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
/* Clear BSS_NO_BITS from HostCmd */
cmd_code &= HostCmd_CMD_ID_MASK;
/* Setup the timer after transmit command */
mod_timer(&adapter->cmd_timer,
- jiffies + (MWIFIEX_TIMER_10S * HZ) / 1000);
+ jiffies + (MWIFIEX_TIMER_10S * HZ) / 1000);
return 0;
}
@@ -230,7 +231,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
struct mwifiex_private *priv;
struct mwifiex_opt_sleep_confirm *sleep_cfm_buf =
(struct mwifiex_opt_sleep_confirm *)
- adapter->sleep_cfm->data;
+ adapter->sleep_cfm->data;
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
sleep_cfm_buf->seq_num =
@@ -250,7 +251,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
return -1;
}
if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
- == MWIFIEX_BSS_ROLE_STA) {
+ == MWIFIEX_BSS_ROLE_STA) {
if (!sleep_cfm_buf->resp_ctrl)
/* Response is not needed for sleep
confirm command */
@@ -258,12 +259,12 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
else
adapter->ps_state = PS_STATE_SLEEP_CFM;
- if (!sleep_cfm_buf->resp_ctrl
- && (adapter->is_hs_configured
- && !adapter->sleep_period.period)) {
+ if (!sleep_cfm_buf->resp_ctrl &&
+ (adapter->is_hs_configured &&
+ !adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
- mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_STA), true);
+ mwifiex_hs_activated_event(mwifiex_get_priv
+ (adapter, MWIFIEX_BSS_ROLE_STA), true);
}
}
@@ -293,7 +294,7 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
cmd_array = kzalloc(buf_size, GFP_KERNEL);
if (!cmd_array) {
dev_err(adapter->dev, "%s: failed to alloc cmd_array\n",
- __func__);
+ __func__);
return -ENOMEM;
}
@@ -376,9 +377,9 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
/* Save the last event to debug log */
adapter->dbg.last_event_index =
- (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
+ (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_event[adapter->dbg.last_event_index] =
- (u16) eventcause;
+ (u16) eventcause;
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause),
@@ -391,13 +392,14 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
if (skb) {
rx_info = MWIFIEX_SKB_RXCB(skb);
- rx_info->bss_index = priv->bss_index;
+ rx_info->bss_num = priv->bss_num;
+ rx_info->bss_type = priv->bss_type;
}
if (eventcause != EVENT_PS_SLEEP && eventcause != EVENT_PS_AWAKE) {
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
- tstamp.tv_sec, tstamp.tv_usec, eventcause);
+ tstamp.tv_sec, tstamp.tv_usec, eventcause);
}
ret = mwifiex_process_sta_event(priv);
@@ -508,7 +510,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
/* Return error, since the command preparation failed */
if (ret) {
dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
- cmd_no);
+ cmd_no);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
return -1;
}
@@ -576,9 +578,9 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
/* Exit_PS command needs to be queued in the header always. */
if (command == HostCmd_CMD_802_11_PS_MODE_ENH) {
struct host_cmd_ds_802_11_ps_mode_enh *pm =
- &host_cmd->params.psmode_enh;
- if ((le16_to_cpu(pm->action) == DIS_PS)
- || (le16_to_cpu(pm->action) == DIS_AUTO_PS)) {
+ &host_cmd->params.psmode_enh;
+ if ((le16_to_cpu(pm->action) == DIS_PS) ||
+ (le16_to_cpu(pm->action) == DIS_AUTO_PS)) {
if (adapter->ps_state != PS_STATE_AWAKE)
add_tail = false;
}
@@ -691,7 +693,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
- le16_to_cpu(resp->command));
+ le16_to_cpu(resp->command));
return -1;
}
@@ -700,7 +702,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
- le16_to_cpu(resp->command));
+ le16_to_cpu(resp->command));
mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
@@ -724,8 +726,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter,
- HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)),
- HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num)));
+ HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)),
+ HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num)));
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Clear RET_BIT from HostCmd */
@@ -736,9 +738,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Save the last command response to debug log */
adapter->dbg.last_cmd_resp_index =
- (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM;
+ (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
- orig_cmdresp_no;
+ orig_cmdresp_no;
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d,"
@@ -760,8 +762,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD;
- if ((cmdresp_result == HostCmd_RESULT_OK)
- && (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH))
+ if ((cmdresp_result == HostCmd_RESULT_OK) &&
+ (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH))
ret = mwifiex_ret_802_11_hs_cfg(priv, resp);
} else {
/* handle response */
@@ -770,7 +772,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Check init command response */
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
- if (ret == -1) {
+ if (ret) {
dev_err(adapter->dev, "%s: cmd %#x failed during "
"initialization\n", __func__, cmdresp_no);
mwifiex_init_fw_complete(adapter);
@@ -780,10 +782,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
}
if (adapter->curr_cmd) {
- if (adapter->curr_cmd->wait_q_enabled && (!ret))
- adapter->cmd_wait_q.status = 0;
- else if (adapter->curr_cmd->wait_q_enabled && (ret == -1))
- adapter->cmd_wait_q.status = -1;
+ if (adapter->curr_cmd->wait_q_enabled)
+ adapter->cmd_wait_q.status = ret;
/* Clean up and put current command back to cmd_free_q */
mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
@@ -825,44 +825,45 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->dbg.timeout_cmd_act =
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
do_gettimeofday(&tstamp);
- dev_err(adapter->dev, "%s: Timeout cmd id (%lu.%lu) = %#x,"
- " act = %#x\n", __func__,
- tstamp.tv_sec, tstamp.tv_usec,
- adapter->dbg.timeout_cmd_id,
- adapter->dbg.timeout_cmd_act);
+ dev_err(adapter->dev,
+ "%s: Timeout cmd id (%lu.%lu) = %#x, act = %#x\n",
+ __func__, tstamp.tv_sec, tstamp.tv_usec,
+ adapter->dbg.timeout_cmd_id,
+ adapter->dbg.timeout_cmd_act);
dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
- adapter->dbg.num_tx_host_to_card_failure);
+ adapter->dbg.num_tx_host_to_card_failure);
dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
- adapter->dbg.num_cmd_host_to_card_failure);
+ adapter->dbg.num_cmd_host_to_card_failure);
dev_err(adapter->dev, "num_cmd_timeout = %d\n",
- adapter->dbg.num_cmd_timeout);
+ adapter->dbg.num_cmd_timeout);
dev_err(adapter->dev, "num_tx_timeout = %d\n",
- adapter->dbg.num_tx_timeout);
+ adapter->dbg.num_tx_timeout);
dev_err(adapter->dev, "last_cmd_index = %d\n",
- adapter->dbg.last_cmd_index);
+ adapter->dbg.last_cmd_index);
print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_id, DBG_CMD_NUM);
+ adapter->dbg.last_cmd_id, DBG_CMD_NUM);
print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_act, DBG_CMD_NUM);
+ adapter->dbg.last_cmd_act, DBG_CMD_NUM);
dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
- adapter->dbg.last_cmd_resp_index);
+ adapter->dbg.last_cmd_resp_index);
print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_resp_id, DBG_CMD_NUM);
+ adapter->dbg.last_cmd_resp_id,
+ DBG_CMD_NUM);
dev_err(adapter->dev, "last_event_index = %d\n",
- adapter->dbg.last_event_index);
+ adapter->dbg.last_event_index);
print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_event, DBG_CMD_NUM);
+ adapter->dbg.last_event, DBG_CMD_NUM);
dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
- adapter->data_sent, adapter->cmd_sent);
+ adapter->data_sent, adapter->cmd_sent);
dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
- adapter->ps_mode, adapter->ps_state);
+ adapter->ps_mode, adapter->ps_state);
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
@@ -943,7 +944,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
uint16_t cancel_scan_cmd = false;
if ((adapter->curr_cmd) &&
- (adapter->curr_cmd->wait_q_enabled)) {
+ (adapter->curr_cmd->wait_q_enabled)) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
@@ -997,9 +998,9 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
else
dev_dbg(adapter->dev,
"cmd: Delay Sleep Confirm (%s%s%s)\n",
- (adapter->cmd_sent) ? "D" : "",
- (adapter->curr_cmd) ? "C" : "",
- (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
+ (adapter->cmd_sent) ? "D" : "",
+ (adapter->curr_cmd) ? "C" : "",
+ (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
}
/*
@@ -1051,8 +1052,8 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
" result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
resp->result, conditions,
- phs_cfg->params.hs_config.gpio,
- phs_cfg->params.hs_config.gap);
+ phs_cfg->params.hs_config.gpio,
+ phs_cfg->params.hs_config.gap);
}
if (conditions != HOST_SLEEP_CFG_CANCEL) {
adapter->is_hs_configured = true;
@@ -1079,7 +1080,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->hs_activated = false;
adapter->is_hs_configured = false;
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY), false);
+ MWIFIEX_BSS_ROLE_ANY),
+ false);
}
/*
@@ -1115,22 +1117,24 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
command &= HostCmd_CMD_ID_MASK;
if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
- dev_err(adapter->dev, "%s: received unexpected response for"
- " cmd %x, result = %x\n", __func__, command, result);
+ dev_err(adapter->dev,
+ "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
+ __func__, command, result);
return;
}
if (result) {
dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
- __func__);
+ __func__);
adapter->pm_wakeup_card_req = false;
adapter->ps_state = PS_STATE_AWAKE;
return;
}
adapter->pm_wakeup_card_req = true;
if (adapter->is_hs_configured)
- mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY), true);
+ mwifiex_hs_activated_event(mwifiex_get_priv
+ (adapter, MWIFIEX_BSS_ROLE_ANY),
+ true);
adapter->ps_state = PS_STATE_SLEEP;
cmd->command = cpu_to_le16(command);
cmd->seq_num = cpu_to_le16(seq_num);
@@ -1164,17 +1168,17 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
psmode_enh->action = cpu_to_le16(DIS_AUTO_PS);
psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) +
- sizeof(psmode_enh->params.ps_bitmap));
+ sizeof(psmode_enh->params.ps_bitmap));
} else if (cmd_action == GET_PS) {
psmode_enh->action = cpu_to_le16(GET_PS);
psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) +
- sizeof(psmode_enh->params.ps_bitmap));
+ sizeof(psmode_enh->params.ps_bitmap));
} else if (cmd_action == EN_AUTO_PS) {
psmode_enh->action = cpu_to_le16(EN_AUTO_PS);
psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
cmd_size = S_DS_GEN + sizeof(psmode_enh->action) +
- sizeof(psmode_enh->params.ps_bitmap);
+ sizeof(psmode_enh->params.ps_bitmap);
tlv = (u8 *) cmd + cmd_size;
if (ps_bitmap & BITMAP_STA_PS) {
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1188,19 +1192,18 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
tlv += sizeof(*ps_tlv);
dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
ps_mode->null_pkt_interval =
- cpu_to_le16(adapter->null_pkt_interval);
+ cpu_to_le16(adapter->null_pkt_interval);
ps_mode->multiple_dtims =
- cpu_to_le16(adapter->multiple_dtim);
+ cpu_to_le16(adapter->multiple_dtim);
ps_mode->bcn_miss_timeout =
- cpu_to_le16(adapter->bcn_miss_time_out);
+ cpu_to_le16(adapter->bcn_miss_time_out);
ps_mode->local_listen_interval =
cpu_to_le16(adapter->local_listen_interval);
ps_mode->adhoc_wake_period =
cpu_to_le16(adapter->adhoc_awake_period);
ps_mode->delay_to_ps =
- cpu_to_le16(adapter->delay_to_ps);
- ps_mode->mode =
- cpu_to_le16(adapter->enhanced_ps_mode);
+ cpu_to_le16(adapter->delay_to_ps);
+ ps_mode->mode = cpu_to_le16(adapter->enhanced_ps_mode);
}
if (ps_bitmap & BITMAP_AUTO_DS) {
@@ -1218,7 +1221,7 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
if (auto_ds)
idletime = auto_ds->idle_time;
dev_dbg(priv->adapter->dev,
- "cmd: PS Command: Enter Auto Deep Sleep\n");
+ "cmd: PS Command: Enter Auto Deep Sleep\n");
auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
}
cmd->size = cpu_to_le16(cmd_size);
@@ -1245,8 +1248,9 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
uint16_t auto_ps_bitmap =
le16_to_cpu(ps_mode->params.ps_bitmap);
- dev_dbg(adapter->dev, "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
- __func__, resp->result, action);
+ dev_dbg(adapter->dev,
+ "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
+ __func__, resp->result, action);
if (action == EN_AUTO_PS) {
if (auto_ps_bitmap & BITMAP_AUTO_DS) {
dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
@@ -1255,7 +1259,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
if (auto_ps_bitmap & BITMAP_STA_PS) {
dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
if (adapter->sleep_period.period)
- dev_dbg(adapter->dev, "cmd: set to uapsd/pps mode\n");
+ dev_dbg(adapter->dev,
+ "cmd: set to uapsd/pps mode\n");
}
} else if (action == DIS_AUTO_PS) {
if (ps_bitmap & BITMAP_AUTO_DS) {
@@ -1374,12 +1379,13 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
- adapter->fw_release_number);
+ adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
- hw_spec->permanent_addr);
- dev_dbg(adapter->dev, "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
+ hw_spec->permanent_addr);
+ dev_dbg(adapter->dev,
+ "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
le16_to_cpu(hw_spec->hw_if_version),
- le16_to_cpu(hw_spec->version));
+ le16_to_cpu(hw_spec->version));
if (priv->curr_addr[0] == 0xff)
memmove(priv->curr_addr, hw_spec->permanent_addr, ETH_ALEN);
@@ -1394,7 +1400,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* If it's unidentified region code, use the default (USA) */
if (i >= MWIFIEX_MAX_REGION_CODE) {
adapter->region_code = 0x10;
- dev_dbg(adapter->dev, "cmd: unknown region code, use default (USA)\n");
+ dev_dbg(adapter->dev,
+ "cmd: unknown region code, use default (USA)\n");
}
adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index ae17ce02a3d0..be5fd1652e53 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -91,18 +91,14 @@ struct mwifiex_fw_image {
u32 fw_len;
};
-struct mwifiex_802_11_ssid {
- u32 ssid_len;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
-};
-
struct mwifiex_wait_queue {
wait_queue_head_t wait;
int status;
};
struct mwifiex_rxinfo {
- u8 bss_index;
+ u8 bss_num;
+ u8 bss_type;
struct sk_buff *parent;
u8 use_count;
};
@@ -110,7 +106,8 @@ struct mwifiex_rxinfo {
struct mwifiex_txinfo {
u32 status_code;
u8 flags;
- u8 bss_index;
+ u8 bss_num;
+ u8 bss_type;
};
enum mwifiex_wmm_ac_e {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 51c5417c569c..e98fc5af73dc 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -86,11 +86,6 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
MWIFIEX_802_11_PRIV_FILTER_8021X_WEP
};
-enum MWIFIEX_802_11_WEP_STATUS {
- MWIFIEX_802_11_WEP_ENABLED,
- MWIFIEX_802_11_WEP_DISABLED,
-};
-
#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
#define PROPRIETARY_TLV_BASE_ID 0x0100
@@ -122,8 +117,8 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define BA_STREAM_NOT_ALLOWED 0xff
#define IS_11N_ENABLED(priv) ((priv->adapter->config_bands & BAND_GN || \
- priv->adapter->config_bands & BAND_AN) \
- && priv->curr_bss_params.bss_descriptor.bcn_ht_cap)
+ priv->adapter->config_bands & BAND_AN) && \
+ priv->curr_bss_params.bss_descriptor.bcn_ht_cap)
#define INITIATOR_BIT(DelBAParamSet) (((DelBAParamSet) &\
BIT(DELBA_INITIATOR_POS)) >> DELBA_INITIATOR_POS)
@@ -857,11 +852,6 @@ struct mwifiex_user_scan_chan {
u32 scan_time;
} __packed;
-struct mwifiex_user_scan_ssid {
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 max_len;
-} __packed;
-
struct mwifiex_user_scan_cfg {
/*
* BSS mode to be sent in the firmware command
@@ -872,8 +862,9 @@ struct mwifiex_user_scan_cfg {
u8 reserved;
/* BSSID filter sent in the firmware command to limit the results */
u8 specific_bssid[ETH_ALEN];
- /* SSID filter list used in the to limit the scan results */
- struct mwifiex_user_scan_ssid ssid_list[MWIFIEX_MAX_SSID_LIST_LENGTH];
+ /* SSID filter list used in the firmware to limit the scan results */
+ struct cfg80211_ssid *ssid_list;
+ u8 num_ssids;
/* Variable number (fixed maximum) of channels to scan up */
struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 1d0ec57a0143..54bb4839b57c 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -35,28 +35,24 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_node *bss_prio;
+ struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
unsigned long flags;
bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL);
if (!bss_prio) {
dev_err(adapter->dev, "%s: failed to alloc bss_prio\n",
- __func__);
+ __func__);
return -ENOMEM;
}
bss_prio->priv = priv;
INIT_LIST_HEAD(&bss_prio->list);
- if (!adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur)
- adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
- bss_prio;
-
- spin_lock_irqsave(&adapter->bss_prio_tbl[priv->bss_priority]
- .bss_prio_lock, flags);
- list_add_tail(&bss_prio->list,
- &adapter->bss_prio_tbl[priv->bss_priority]
- .bss_prio_head);
- spin_unlock_irqrestore(&adapter->bss_prio_tbl[priv->bss_priority]
- .bss_prio_lock, flags);
+ if (!tbl[priv->bss_priority].bss_prio_cur)
+ tbl[priv->bss_priority].bss_prio_cur = bss_prio;
+
+ spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head);
+ spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
return 0;
}
@@ -82,7 +78,7 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR;
priv->data_avg_factor = DEFAULT_DATA_AVG_FACTOR;
- priv->sec_info.wep_status = MWIFIEX_802_11_WEP_DISABLED;
+ priv->sec_info.wep_enabled = 0;
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
priv->sec_info.encryption_mode = 0;
for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++)
@@ -157,13 +153,13 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
ret = mwifiex_alloc_cmd_buffer(adapter);
if (ret) {
dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
- __func__);
+ __func__);
return -1;
}
adapter->sleep_cfm =
dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
- + INTF_HEADER_LEN);
+ + INTF_HEADER_LEN);
if (!adapter->sleep_cfm) {
dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
@@ -280,6 +276,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->adhoc_awake_period = 0;
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
+ adapter->channel_type = NL80211_CHAN_HT20;
}
/*
@@ -519,7 +516,7 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_node *bssprio_node, *tmp_node, **cur;
struct list_head *head;
- spinlock_t *lock;
+ spinlock_t *lock; /* bss priority lock */
unsigned long flags;
for (i = 0; i < adapter->priv_num; ++i) {
@@ -527,8 +524,9 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
cur = &adapter->bss_prio_tbl[i].bss_prio_cur;
lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
dev_dbg(adapter->dev, "info: delete BSS priority table,"
- " index = %d, i = %d, head = %p, cur = %p\n",
- priv->bss_index, i, head, *cur);
+ " bss_type = %d, bss_num = %d, i = %d,"
+ " head = %p, cur = %p\n",
+ priv->bss_type, priv->bss_num, i, head, *cur);
if (*cur) {
spin_lock_irqsave(lock, flags);
if (list_empty(head)) {
@@ -636,7 +634,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (!ret) {
dev_notice(adapter->dev,
- "WLAN FW already running! Skip FW download\n");
+ "WLAN FW already running! Skip FW download\n");
goto done;
}
poll_num = MAX_FIRMWARE_POLL_TRIES;
@@ -644,8 +642,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* Check if we are the winner for downloading FW */
if (!adapter->winner) {
dev_notice(adapter->dev,
- "Other interface already running!"
- " Skip FW download\n");
+ "Other intf already running! Skip FW download\n");
poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
goto poll_fw;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d5d81f1fe41c..7ca4e8234f3e 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -50,7 +50,7 @@ struct mwifiex_chan_freq {
};
struct mwifiex_ssid_bssid {
- struct mwifiex_802_11_ssid ssid;
+ struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
};
@@ -122,7 +122,7 @@ struct mwifiex_ver_ext {
struct mwifiex_bss_info {
u32 bss_mode;
- struct mwifiex_802_11_ssid ssid;
+ struct cfg80211_ssid ssid;
u32 bss_chan;
u32 region_code;
u32 media_connected;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 0b0eb5efba9d..8f9382b9c3ca 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -52,8 +52,9 @@ mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->gen_ie_buf_len) {
- dev_dbg(priv->adapter->dev, "info: %s: append generic %d to %p\n",
- __func__, priv->gen_ie_buf_len, *buffer);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: append generic ie len %d to %p\n",
+ __func__, priv->gen_ie_buf_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
@@ -123,8 +124,9 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val));
- dev_dbg(priv->adapter->dev, "info: %s: TSF offset calc: %016llx - "
- "%016llx\n", __func__, tsf_val, bss_desc->network_tsf);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: TSF offset calc: %016llx - %016llx\n",
+ __func__, tsf_val, bss_desc->network_tsf);
memcpy(*buffer, &tsf_val, sizeof(tsf_val));
*buffer += sizeof(tsf_val);
@@ -167,7 +169,7 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
}
dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
- priv->data_rate);
+ priv->data_rate);
if (!priv->is_data_rate_auto) {
while (*ptr) {
@@ -212,7 +214,7 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
card_rates, card_rates_size)) {
*out_rates_size = 0;
dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
- __func__);
+ __func__);
return -1;
}
@@ -248,7 +250,7 @@ mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
*/
if (priv->wapi_ie_len) {
dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
- priv->wapi_ie_len, *buffer);
+ priv->wapi_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
@@ -293,10 +295,10 @@ static int mwifiex_append_rsn_ie_wpa_wpa2(struct mwifiex_private *priv,
le16_to_cpu(rsn_ie_tlv->header.type) & 0x00FF);
rsn_ie_tlv->header.len = cpu_to_le16((u16) priv->wpa_ie[1]);
rsn_ie_tlv->header.len = cpu_to_le16(le16_to_cpu(rsn_ie_tlv->header.len)
- & 0x00FF);
+ & 0x00FF);
if (le16_to_cpu(rsn_ie_tlv->header.len) <= (sizeof(priv->wpa_ie) - 2))
memcpy(rsn_ie_tlv->rsn_ie, &priv->wpa_ie[2],
- le16_to_cpu(rsn_ie_tlv->header.len));
+ le16_to_cpu(rsn_ie_tlv->header.len));
else
return -1;
@@ -379,7 +381,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
ssid_tlv->header.type = cpu_to_le16(WLAN_EID_SSID);
ssid_tlv->header.len = cpu_to_le16((u16) bss_desc->ssid.ssid_len);
memcpy(ssid_tlv->ssid, bss_desc->ssid.ssid,
- le16_to_cpu(ssid_tlv->header.len));
+ le16_to_cpu(ssid_tlv->header.len));
pos += sizeof(ssid_tlv->header) + le16_to_cpu(ssid_tlv->header.len);
phy_tlv = (struct mwifiex_ie_types_phy_param_set *) pos;
@@ -411,13 +413,13 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
memcpy(rates_tlv->rates, rates, rates_size);
pos += sizeof(rates_tlv->header) + rates_size;
dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
- rates_size);
+ rates_size);
/* Add the Authentication type to be used for Auth frames */
auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
auth_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
auth_tlv->header.len = cpu_to_le16(sizeof(auth_tlv->auth_type));
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
+ if (priv->sec_info.wep_enabled)
auth_tlv->auth_type = cpu_to_le16(
(u16) priv->sec_info.authentication_mode);
else
@@ -425,12 +427,12 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len);
- if (IS_SUPPORT_MULTI_BANDS(priv->adapter)
- && !(ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
- && (!bss_desc->disable_11n)
- && (priv->adapter->config_bands & BAND_GN
- || priv->adapter->config_bands & BAND_AN)
- && (bss_desc->bcn_ht_cap)
+ if (IS_SUPPORT_MULTI_BANDS(priv->adapter) &&
+ !(ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
+ (!bss_desc->disable_11n) &&
+ (priv->adapter->config_bands & BAND_GN ||
+ priv->adapter->config_bands & BAND_AN) &&
+ (bss_desc->bcn_ht_cap)
)
) {
/* Append a channel TLV for the channel the attempted AP was
@@ -445,13 +447,13 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
}
@@ -464,10 +466,10 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
return -1;
}
- if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
- && (!bss_desc->disable_11n)
- && (priv->adapter->config_bands & BAND_GN
- || priv->adapter->config_bands & BAND_AN))
+ if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
+ (!bss_desc->disable_11n) &&
+ (priv->adapter->config_bands & BAND_GN ||
+ priv->adapter->config_bands & BAND_AN))
mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos);
/* Append vendor specific IE TLV */
@@ -493,7 +495,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
tmp_cap &= CAPINFO_MASK;
dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
- tmp_cap, CAPINFO_MASK);
+ tmp_cap, CAPINFO_MASK);
assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
return 0;
@@ -573,19 +575,19 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
- sizeof(priv->assoc_rsp_buf));
+ sizeof(priv->assoc_rsp_buf));
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
if (le16_to_cpu(assoc_rsp->status_code)) {
priv->adapter->dbg.num_cmd_assoc_failure++;
- dev_err(priv->adapter->dev, "ASSOC_RESP: association failed, "
- "status code = %d, error = 0x%x, a_id = 0x%x\n",
- le16_to_cpu(assoc_rsp->status_code),
- le16_to_cpu(assoc_rsp->cap_info_bitmap),
- le16_to_cpu(assoc_rsp->a_id));
+ dev_err(priv->adapter->dev,
+ "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n",
+ le16_to_cpu(assoc_rsp->status_code),
+ le16_to_cpu(assoc_rsp->cap_info_bitmap),
+ le16_to_cpu(assoc_rsp->a_id));
- ret = -1;
+ ret = le16_to_cpu(assoc_rsp->status_code);
goto done;
}
@@ -600,7 +602,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
bss_desc = priv->attempted_bss_desc;
dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
- bss_desc->ssid.ssid);
+ bss_desc->ssid.ssid);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor,
@@ -617,8 +619,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
else
priv->curr_bss_params.wmm_enabled = false;
- if ((priv->wmm_required || bss_desc->bcn_ht_cap)
- && priv->curr_bss_params.wmm_enabled)
+ if ((priv->wmm_required || bss_desc->bcn_ht_cap) &&
+ priv->curr_bss_params.wmm_enabled)
priv->wmm_enabled = true;
else
priv->wmm_enabled = false;
@@ -631,7 +633,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
- priv->curr_pkt_filter);
+ priv->curr_pkt_filter);
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->wpa_is_gtk_set = false;
@@ -714,7 +716,7 @@ done:
int
mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
int rsn_ie_len = 0;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -755,7 +757,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
- adhoc_start->ssid);
+ adhoc_start->ssid);
memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
@@ -777,12 +779,11 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
adhoc_start->phy_param_set.ds_param_set.element_id = DS_PARA_IE_ID;
adhoc_start->phy_param_set.ds_param_set.len = DS_PARA_IE_LEN;
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band, (u16)
- priv->adhoc_channel)) {
+ if (!mwifiex_get_cfp(priv, adapter->adhoc_start_band,
+ (u16) priv->adhoc_channel, 0)) {
struct mwifiex_chan_freq_power *cfp;
- cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
- adapter->adhoc_start_band, FIRST_VALID_CHANNEL);
+ cfp = mwifiex_get_cfp(priv, adapter->adhoc_start_band,
+ FIRST_VALID_CHANNEL, 0);
if (cfp)
priv->adhoc_channel = (u8) cfp->channel;
}
@@ -793,7 +794,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
- priv->adhoc_channel);
+ priv->adhoc_channel);
priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
priv->curr_bss_params.band = adapter->adhoc_start_band;
@@ -814,7 +815,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
adhoc_start->ss_param_set.ibss_param_set.element_id = IBSS_PARA_IE_ID;
adhoc_start->ss_param_set.ibss_param_set.len = IBSS_PARA_IE_LEN;
adhoc_start->ss_param_set.ibss_param_set.atim_window
- = cpu_to_le16(priv->atim_window);
+ = cpu_to_le16(priv->atim_window);
memcpy(&bss_desc->ss_param_set, &adhoc_start->ss_param_set,
sizeof(union ieee_types_ss_param_set));
@@ -842,10 +843,10 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter)) {
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter)) {
dev_err(adapter->dev,
- "ADHOC_S_CMD: G Protection config failed\n");
+ "ADHOC_S_CMD: G Protection config failed\n");
return -1;
}
}
@@ -861,8 +862,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
&adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
- adhoc_start->data_rate[0], adhoc_start->data_rate[1],
- adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
+ adhoc_start->data_rate[0], adhoc_start->data_rate[1],
+ adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
@@ -879,12 +880,12 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
(u8) priv->curr_bss_params.bss_descriptor.channel;
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- if (adapter->adhoc_start_band & BAND_GN
- || adapter->adhoc_start_band & BAND_AN) {
+ if (adapter->adhoc_start_band & BAND_GN ||
+ adapter->adhoc_start_band & BAND_AN) {
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
@@ -895,7 +896,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size +=
@@ -926,15 +927,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
pos += sizeof(struct mwifiex_ie_types_htcap);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htcap);
+ cmd_append_size += sizeof(struct mwifiex_ie_types_htcap);
/* Fill HT INFORMATION */
ht_info = (struct mwifiex_ie_types_htinfo *) pos;
memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
ht_info->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_info));
+ cpu_to_le16(sizeof(struct ieee80211_ht_info));
ht_info->ht_info.control_chan =
(u8) priv->curr_bss_params.bss_descriptor.channel;
@@ -948,12 +948,12 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
ht_info->ht_info.basic_set[0] = 0xff;
pos += sizeof(struct mwifiex_ie_types_htinfo);
cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htinfo);
+ sizeof(struct mwifiex_ie_types_htinfo);
}
- cmd->size = cpu_to_le16((u16)
- (sizeof(struct host_cmd_ds_802_11_ad_hoc_start)
- + S_DS_GEN + cmd_append_size));
+ cmd->size =
+ cpu_to_le16((u16)(sizeof(struct host_cmd_ds_802_11_ad_hoc_start)
+ + S_DS_GEN + cmd_append_size));
if (adapter->adhoc_start_band == BAND_B)
tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
@@ -1006,10 +1006,10 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &curr_pkt_filter)) {
+ HostCmd_ACT_GEN_SET, 0,
+ &curr_pkt_filter)) {
dev_err(priv->adapter->dev,
- "ADHOC_J_CMD: G Protection config failed\n");
+ "ADHOC_J_CMD: G Protection config failed\n");
return -1;
}
}
@@ -1040,13 +1040,14 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
tmp_cap &= CAPINFO_MASK;
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: tmp_cap=%4X"
- " CAPINFO_MASK=%4lX\n", tmp_cap, CAPINFO_MASK);
+ dev_dbg(priv->adapter->dev,
+ "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+ tmp_cap, CAPINFO_MASK);
/* Information on BSSID descriptor passed to FW */
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID = %pM, SSID = %s\n",
- adhoc_join->bss_descriptor.bssid,
- adhoc_join->bss_descriptor.ssid);
+ dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
+ adhoc_join->bss_descriptor.bssid,
+ adhoc_join->bss_descriptor.ssid);
for (i = 0; bss_desc->supported_rates[i] &&
i < MWIFIEX_SUPPORTED_RATES;
@@ -1069,8 +1070,7 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
priv->curr_bss_params.bss_descriptor.channel = bss_desc->channel;
priv->curr_bss_params.band = (u8) bss_desc->bss_band;
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
- || priv->sec_info.wpa_enabled)
+ if (priv->sec_info.wep_enabled || priv->sec_info.wpa_enabled)
tmp_cap |= WLAN_CAPABILITY_PRIVACY;
if (IS_SUPPORT_MULTI_BANDS(priv->adapter)) {
@@ -1084,18 +1084,18 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
- sizeof(struct mwifiex_chan_scan_param_set);
+ sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size += sizeof(chan_tlv->header) +
- sizeof(struct mwifiex_chan_scan_param_set);
+ sizeof(struct mwifiex_chan_scan_param_set);
}
if (priv->sec_info.wpa_enabled)
@@ -1112,9 +1112,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
MWIFIEX_VSIE_MASK_ADHOC, &pos);
- cmd->size = cpu_to_le16((u16)
- (sizeof(struct host_cmd_ds_802_11_ad_hoc_join)
- + S_DS_GEN + cmd_append_size));
+ cmd->size = cpu_to_le16
+ ((u16) (sizeof(struct host_cmd_ds_802_11_ad_hoc_join)
+ + S_DS_GEN + cmd_append_size));
adhoc_join->bss_descriptor.cap_info_bitmap = cpu_to_le16(tmp_cap);
@@ -1159,7 +1159,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
- bss_desc->ssid.ssid);
+ bss_desc->ssid.ssid);
/* Update the created network descriptor with the new BSSID */
memcpy(bss_desc->mac_address,
@@ -1172,7 +1172,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
* If BSSID has changed use SSID to compare instead of BSSID
*/
dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
- bss_desc->ssid.ssid);
+ bss_desc->ssid.ssid);
/*
* Make a copy of current BSSID descriptor, only needed for
@@ -1186,9 +1186,9 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
}
dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
- priv->adhoc_channel);
+ priv->adhoc_channel);
dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
- priv->curr_bss_params.bss_descriptor.mac_address);
+ priv->curr_bss_params.bss_descriptor.mac_address);
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
@@ -1246,14 +1246,14 @@ int mwifiex_associate(struct mwifiex_private *priv,
*/
int
mwifiex_adhoc_start(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *adhoc_ssid)
+ struct cfg80211_ssid *adhoc_ssid)
{
dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
priv->adhoc_channel);
dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
- priv->curr_bss_params.bss_descriptor.channel);
+ priv->curr_bss_params.bss_descriptor.channel);
dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
- priv->curr_bss_params.band);
+ priv->curr_bss_params.band);
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START,
HostCmd_ACT_GEN_SET, 0, adhoc_ssid);
@@ -1269,13 +1269,13 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid);
+ priv->curr_bss_params.bss_descriptor.ssid.ssid);
dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+ priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
bss_desc->ssid.ssid);
dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
- bss_desc->ssid.ssid_len);
+ bss_desc->ssid.ssid_len);
/* Check if the requested SSID is already joined */
if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
@@ -1289,9 +1289,9 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
}
dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
- priv->curr_bss_params.bss_descriptor.channel);
+ priv->curr_bss_params.bss_descriptor.channel);
dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
- priv->curr_bss_params.band);
+ priv->curr_bss_params.band);
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
HostCmd_ACT_GEN_SET, 0, bss_desc);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index b728f54451e4..9d1b3ca6334b 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -64,11 +64,10 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
adapter->priv_num = 0;
/* Allocate memory for private structure */
- adapter->priv[0] = kzalloc(sizeof(struct mwifiex_private),
- GFP_KERNEL);
+ adapter->priv[0] = kzalloc(sizeof(struct mwifiex_private), GFP_KERNEL);
if (!adapter->priv[0]) {
- dev_err(adapter->dev, "%s: failed to alloc priv[0]\n",
- __func__);
+ dev_err(adapter->dev,
+ "%s: failed to alloc priv[0]\n", __func__);
goto error;
}
@@ -169,8 +168,8 @@ process_start:
if ((adapter->ps_state == PS_STATE_SLEEP) &&
(adapter->pm_wakeup_card_req &&
!adapter->pm_wakeup_fw_try) &&
- (is_command_pending(adapter)
- || !mwifiex_wmm_lists_empty(adapter))) {
+ (is_command_pending(adapter) ||
+ !mwifiex_wmm_lists_empty(adapter))) {
adapter->pm_wakeup_fw_try = true;
adapter->if_ops.wakeup(adapter);
continue;
@@ -187,10 +186,10 @@ process_start:
adapter->tx_lock_flag)
break;
- if (adapter->scan_processing || adapter->data_sent
- || mwifiex_wmm_lists_empty(adapter)) {
- if (adapter->cmd_sent || adapter->curr_cmd
- || (!is_command_pending(adapter)))
+ if (adapter->scan_processing || adapter->data_sent ||
+ mwifiex_wmm_lists_empty(adapter)) {
+ if (adapter->cmd_sent || adapter->curr_cmd ||
+ (!is_command_pending(adapter)))
break;
}
}
@@ -223,10 +222,10 @@ process_start:
/* * The ps_state may have been changed during processing of
* Sleep Request event.
*/
- if ((adapter->ps_state == PS_STATE_SLEEP)
- || (adapter->ps_state == PS_STATE_PRE_SLEEP)
- || (adapter->ps_state == PS_STATE_SLEEP_CFM)
- || adapter->tx_lock_flag)
+ if ((adapter->ps_state == PS_STATE_SLEEP) ||
+ (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
+ (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
+ adapter->tx_lock_flag)
continue;
if (!adapter->cmd_sent && !adapter->curr_cmd) {
@@ -249,8 +248,8 @@ process_start:
}
if (adapter->delay_null_pkt && !adapter->cmd_sent &&
- !adapter->curr_cmd && !is_command_pending(adapter)
- && mwifiex_wmm_lists_empty(adapter)) {
+ !adapter->curr_cmd && !is_command_pending(adapter) &&
+ mwifiex_wmm_lists_empty(adapter)) {
if (!mwifiex_send_null_packet
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
@@ -371,7 +370,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
iph = ip_hdr(skb);
tid = IPTOS_PREC(iph->tos);
pr_debug("data: packet type ETH_P_IP: %04x, tid=%#x prio=%#x\n",
- eth->h_proto, tid, skb->priority);
+ eth->h_proto, tid, skb->priority);
break;
case __constant_htons(ETH_P_ARP):
pr_debug("data: ARP packet: %04x\n", eth->h_proto);
@@ -424,8 +423,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *new_skb;
struct mwifiex_txinfo *tx_info;
- dev_dbg(priv->adapter->dev, "data: %lu BSS(%d): Data <= kernel\n",
- jiffies, priv->bss_index);
+ dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
+ jiffies, priv->bss_type, priv->bss_num);
if (priv->adapter->surprise_removed) {
kfree_skb(skb);
@@ -441,7 +440,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
dev_dbg(priv->adapter->dev,
"data: Tx: insufficient skb headroom %d\n",
- skb_headroom(skb));
+ skb_headroom(skb));
/* Insufficient skb headroom - allocate a new skb */
new_skb =
skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
@@ -454,14 +453,15 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb(skb);
skb = new_skb;
dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
- skb_headroom(skb));
+ skb_headroom(skb));
}
tx_info = MWIFIEX_SKB_TXCB(skb);
- tx_info->bss_index = priv->bss_index;
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
mwifiex_fill_buffer(skb);
- mwifiex_wmm_add_buf_txqueue(priv->adapter, skb);
+ mwifiex_wmm_add_buf_txqueue(priv, skb);
atomic_inc(&priv->adapter->tx_pending);
if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
@@ -493,8 +493,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
if (!ret)
memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
else
- dev_err(priv->adapter->dev, "set mac address failed: ret=%d"
- "\n", ret);
+ dev_err(priv->adapter->dev,
+ "set mac address failed: ret=%d\n", ret);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -531,8 +531,8 @@ mwifiex_tx_timeout(struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
- jiffies, priv->bss_index);
+ dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_type-num = %d-%d\n",
+ jiffies, priv->bss_type, priv->bss_num);
mwifiex_set_trans_start(dev);
priv->num_tx_timeout++;
}
@@ -605,18 +605,6 @@ int is_command_pending(struct mwifiex_adapter *adapter)
}
/*
- * This function returns the correct private structure pointer based
- * upon the BSS number.
- */
-struct mwifiex_private *
-mwifiex_bss_index_to_priv(struct mwifiex_adapter *adapter, u8 bss_index)
-{
- if (!adapter || (bss_index >= adapter->priv_num))
- return NULL;
- return adapter->priv[bss_index];
-}
-
-/*
* This is the main work queue function.
*
* It handles the main process, which in turn handles the complete
@@ -715,7 +703,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
rtnl_lock();
/* Create station interface by default */
if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
- NL80211_IFTYPE_STATION, NULL, NULL)) {
+ NL80211_IFTYPE_STATION, NULL, NULL)) {
rtnl_unlock();
dev_err(adapter->dev, "cannot create default station"
" interface\n");
@@ -792,7 +780,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
if (priv && priv->netdev) {
if (!netif_queue_stopped(priv->netdev))
mwifiex_stop_net_dev_queue(priv->netdev,
- adapter);
+ adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3186aa437f42..35225e9b1080 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -217,8 +217,9 @@ struct mwifiex_802_11_security {
u8 wpa2_enabled;
u8 wapi_enabled;
u8 wapi_key_on;
- enum MWIFIEX_802_11_WEP_STATUS wep_status;
+ u8 wep_enabled;
u32 authentication_mode;
+ u8 is_authtype_auto;
u32 encryption_mode;
};
@@ -243,7 +244,7 @@ struct ieee_types_generic {
struct mwifiex_bssdescriptor {
u8 mac_address[ETH_ALEN];
- struct mwifiex_802_11_ssid ssid;
+ struct cfg80211_ssid ssid;
u32 privacy;
s32 rssi;
u32 channel;
@@ -352,7 +353,6 @@ struct mwifiex_private;
struct mwifiex_private {
struct mwifiex_adapter *adapter;
- u8 bss_index;
u8 bss_type;
u8 bss_role;
u8 bss_priority;
@@ -388,10 +388,11 @@ struct mwifiex_private {
s16 bcn_rssi_avg;
s16 bcn_nf_avg;
struct mwifiex_bssdescriptor *attempted_bss_desc;
- struct mwifiex_802_11_ssid prev_ssid;
+ struct cfg80211_ssid prev_ssid;
u8 prev_bssid[ETH_ALEN];
struct mwifiex_current_bss_params curr_bss_params;
u16 beacon_period;
+ u8 dtim_period;
u16 listen_interval;
u16 atim_window;
u8 adhoc_channel;
@@ -461,9 +462,9 @@ struct mwifiex_private {
};
enum mwifiex_ba_status {
- BA_STREAM_NOT_SETUP = 0,
- BA_STREAM_SETUP_INPROGRESS,
- BA_STREAM_SETUP_COMPLETE
+ BA_SETUP_NONE = 0,
+ BA_SETUP_INPROGRESS,
+ BA_SETUP_COMPLETE
};
struct mwifiex_tx_ba_stream_tbl {
@@ -746,8 +747,7 @@ void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node);
int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-s32 mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
- struct mwifiex_802_11_ssid *ssid2);
+s32 mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2);
int mwifiex_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
@@ -759,25 +759,20 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
int mwifiex_adhoc_start(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *adhoc_ssid);
+ struct cfg80211_ssid *adhoc_ssid);
int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- struct mwifiex_802_11_ssid *req_ssid);
+ struct cfg80211_ssid *req_ssid);
int mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd);
-struct mwifiex_chan_freq_power *
- mwifiex_get_cfp_by_band_and_channel_from_cfg80211(
- struct mwifiex_private *priv,
- u8 band, u16 channel);
-struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
- struct mwifiex_private *priv,
- u8 band, u32 freq);
+struct mwifiex_chan_freq_power *mwifiex_get_cfp(struct mwifiex_private *priv,
+ u8 band, u16 channel, u32 freq);
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
u8 ht_info);
u32 mwifiex_find_freq_from_band_chan(u8, u8);
@@ -846,8 +841,8 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
- if ((adapter->priv[i]->bss_num == bss_num)
- && (adapter->priv[i]->bss_type == bss_type))
+ if ((adapter->priv[i]->bss_num == bss_num) &&
+ (adapter->priv[i]->bss_type == bss_type))
break;
}
}
@@ -884,8 +879,6 @@ mwifiex_netdev_get_priv(struct net_device *dev)
return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
}
-struct mwifiex_private *mwifiex_bss_index_to_priv(struct mwifiex_adapter
- *adapter, u8 bss_index);
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -899,7 +892,7 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
struct net_device *dev);
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
- struct mwifiex_802_11_ssid *req_ssid);
+ struct cfg80211_ssid *req_ssid);
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
@@ -908,13 +901,12 @@ int mwifiex_get_signal_info(struct mwifiex_private *priv,
int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
struct mwifiex_rate_cfg *rate);
int mwifiex_request_scan(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *req_ssid);
+ struct cfg80211_ssid *req_ssid);
int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
struct mwifiex_user_scan_cfg *scan_req);
-int mwifiex_change_adhoc_chan(struct mwifiex_private *priv, int channel);
int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
-int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel);
+int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
int key_len, u8 key_index, int disable);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 405350940a45..5867facd415d 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -83,13 +83,11 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
struct pcie_service_card *card;
pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
- pdev->vendor, pdev->device, pdev->revision);
+ pdev->vendor, pdev->device, pdev->revision);
card = kzalloc(sizeof(struct pcie_service_card), GFP_KERNEL);
- if (!card) {
- pr_err("%s: failed to alloc memory\n", __func__);
+ if (!card)
return -ENOMEM;
- }
card->dev = pdev;
@@ -110,6 +108,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
{
struct pcie_service_card *card;
struct mwifiex_adapter *adapter;
+ struct mwifiex_private *priv;
int i;
card = pci_get_drvdata(pdev);
@@ -128,16 +127,15 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
for (i = 0; i < adapter->priv_num; i++)
if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
+ MWIFIEX_BSS_ROLE_STA) &&
+ adapter->priv[i]->media_connected)
mwifiex_deauthenticate(adapter->priv[i], NULL);
- mwifiex_disable_auto_ds(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY));
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+
+ mwifiex_disable_auto_ds(priv);
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
+ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
}
mwifiex_remove_card(card->adapter, &add_remove_card_sem);
@@ -221,7 +219,7 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
netif_carrier_on(adapter->priv[i]->netdev);
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
- MWIFIEX_ASYNC_CMD);
+ MWIFIEX_ASYNC_CMD);
return 0;
}
@@ -288,7 +286,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
while (mwifiex_pcie_ok_to_access_hw(adapter)) {
i++;
- udelay(10);
+ usleep_range(10, 20);
/* 50ms max wait */
if (i == 50000)
break;
@@ -380,26 +378,26 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
/* allocate shared memory for the BD ring and divide the same in to
several descriptors */
card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_TXRX_BD;
+ MWIFIEX_MAX_TXRX_BD;
dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
- card->txbd_ring_size);
+ card->txbd_ring_size);
card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
if (!card->txbd_ring_vbase) {
- dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
+ dev_err(adapter->dev, "Unable to alloc buffer for txbd ring\n");
return -ENOMEM;
}
card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
- dev_dbg(adapter->dev, "info: txbd_ring - base: %p, pbase: %#x:%x,"
- "len: %x\n", card->txbd_ring_vbase,
- (u32)card->txbd_ring_pbase,
- (u32)((u64)card->txbd_ring_pbase >> 32),
- card->txbd_ring_size);
+ dev_dbg(adapter->dev,
+ "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
+ card->txbd_ring_vbase, (u32)card->txbd_ring_pbase,
+ (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
card->txbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->txbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc) * i));
+ (card->txbd_ring_vbase +
+ (sizeof(struct mwifiex_pcie_buf_desc)
+ * i));
/* Allocate buffer here so that firmware can DMA data from it */
skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
@@ -412,10 +410,9 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE);
dev_dbg(adapter->dev, "info: TX ring: add new skb base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, "
- "buf_len: %#x\n", skb, skb->data,
- (u32)*buf_pa, (u32)(((u64)*buf_pa >> 32)),
- skb->len);
+ "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
+ skb, skb->data, (u32)*buf_pa,
+ (u32)(((u64)*buf_pa >> 32)), skb->len);
card->tx_buf_list[i] = skb;
card->txbd_ring[i]->paddr = *buf_pa;
@@ -469,9 +466,9 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
card->rxbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_TXRX_BD;
+ MWIFIEX_MAX_TXRX_BD;
dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
- card->rxbd_ring_size);
+ card->rxbd_ring_size);
card->rxbd_ring_vbase = kzalloc(card->rxbd_ring_size, GFP_KERNEL);
if (!card->rxbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for "
@@ -480,21 +477,23 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
}
card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
- dev_dbg(adapter->dev, "info: rxbd_ring - base: %p, pbase: %#x:%x,"
- "len: %#x\n", card->rxbd_ring_vbase,
- (u32)card->rxbd_ring_pbase,
- (u32)((u64)card->rxbd_ring_pbase >> 32),
- card->rxbd_ring_size);
+ dev_dbg(adapter->dev,
+ "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+ card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
+ (u32)((u64)card->rxbd_ring_pbase >> 32),
+ card->rxbd_ring_size);
for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
card->rxbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->rxbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc) * i));
+ (card->rxbd_ring_vbase +
+ (sizeof(struct mwifiex_pcie_buf_desc)
+ * i));
/* Allocate skb here so that firmware can DMA data from it */
skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE);
if (!skb) {
- dev_err(adapter->dev, "Unable to allocate skb for RX ring.\n");
+ dev_err(adapter->dev,
+ "Unable to allocate skb for RX ring.\n");
kfree(card->rxbd_ring_vbase);
return -ENOMEM;
}
@@ -502,10 +501,9 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MWIFIEX_RX_DATA_BUF_SIZE);
dev_dbg(adapter->dev, "info: RX ring: add new skb base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, "
- "buf_len: %#x\n", skb, skb->data,
- (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
- skb->len);
+ "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
+ skb, skb->data, (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
+ skb->len);
card->rx_buf_list[i] = skb;
card->rxbd_ring[i]->paddr = *buf_pa;
@@ -562,32 +560,34 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
card->evtbd_rdptr |= MWIFIEX_BD_FLAG_ROLLOVER_IND;
card->evtbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
- MWIFIEX_MAX_EVT_BD;
+ MWIFIEX_MAX_EVT_BD;
dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
- card->evtbd_ring_size);
+ card->evtbd_ring_size);
card->evtbd_ring_vbase = kzalloc(card->evtbd_ring_size, GFP_KERNEL);
if (!card->evtbd_ring_vbase) {
- dev_err(adapter->dev, "Unable to allocate buffer. "
- "Terminating download\n");
+ dev_err(adapter->dev,
+ "Unable to allocate buffer. Terminating download\n");
return -ENOMEM;
}
card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
- dev_dbg(adapter->dev, "info: CMDRSP/EVT bd_ring - base: %p, "
- "pbase: %#x:%x, len: %#x\n", card->evtbd_ring_vbase,
- (u32)card->evtbd_ring_pbase,
- (u32)((u64)card->evtbd_ring_pbase >> 32),
- card->evtbd_ring_size);
+ dev_dbg(adapter->dev,
+ "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
+ card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
+ (u32)((u64)card->evtbd_ring_pbase >> 32),
+ card->evtbd_ring_size);
for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) {
card->evtbd_ring[i] = (struct mwifiex_pcie_buf_desc *)
- (card->evtbd_ring_vbase +
- (sizeof(struct mwifiex_pcie_buf_desc) * i));
+ (card->evtbd_ring_vbase +
+ (sizeof(struct mwifiex_pcie_buf_desc)
+ * i));
/* Allocate skb here so that firmware can DMA data from it */
skb = dev_alloc_skb(MAX_EVENT_SIZE);
if (!skb) {
- dev_err(adapter->dev, "Unable to allocate skb for EVENT buf.\n");
+ dev_err(adapter->dev,
+ "Unable to allocate skb for EVENT buf.\n");
kfree(card->evtbd_ring_vbase);
return -ENOMEM;
}
@@ -595,10 +595,9 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
dev_dbg(adapter->dev, "info: Evt ring: add new skb. base: %p, "
- "buf_base: %p, buf_pbase: %#x:%x, "
- "buf_len: %#x\n", skb, skb->data,
- (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
- skb->len);
+ "buf_base: %p, buf_pbase: %#x:%x, buf_len: %#x\n",
+ skb, skb->data, (u32)*buf_pa, (u32)((u64)*buf_pa >> 32),
+ skb->len);
card->evt_buf_list[i] = skb;
card->evtbd_ring[i]->paddr = *buf_pa;
@@ -647,8 +646,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
/* Allocate memory for receiving command response data */
skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
if (!skb) {
- dev_err(adapter->dev, "Unable to allocate skb for command "
- "response data.\n");
+ dev_err(adapter->dev,
+ "Unable to allocate skb for command response data.\n");
return -ENOMEM;
}
mwifiex_update_sk_buff_pa(skb);
@@ -659,8 +658,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
/* Allocate memory for sending command to firmware */
skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
if (!skb) {
- dev_err(adapter->dev, "Unable to allocate skb for command "
- "data.\n");
+ dev_err(adapter->dev,
+ "Unable to allocate skb for command data.\n");
return -ENOMEM;
}
mwifiex_update_sk_buff_pa(skb);
@@ -702,8 +701,8 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
/* Allocate memory for sleep cookie */
skb = dev_alloc_skb(sizeof(u32));
if (!skb) {
- dev_err(adapter->dev, "Unable to allocate skb for sleep "
- "cookie!\n");
+ dev_err(adapter->dev,
+ "Unable to allocate skb for sleep cookie!\n");
return -ENOMEM;
}
mwifiex_update_sk_buff_pa(skb);
@@ -713,7 +712,7 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
*(u32 *)skb->data = FW_AWAKE_COOKIE;
dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
- *((u32 *)skb->data));
+ *((u32 *)skb->data));
/* Save the sleep cookie */
card->sleep_cookie = skb;
@@ -757,15 +756,15 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Read the TX ring read pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_TXBD_RDPTR, &rdptr)) {
- dev_err(adapter->dev, "SEND DATA: failed to read "
- "REG_TXBD_RDPTR\n");
+ dev_err(adapter->dev,
+ "SEND DATA: failed to read REG_TXBD_RDPTR\n");
return -1;
}
wrindx = card->txbd_wrptr & MWIFIEX_TXBD_MASK;
dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n", rdptr,
- card->txbd_wrptr);
+ card->txbd_wrptr);
if (((card->txbd_wrptr & MWIFIEX_TXBD_MASK) !=
(rdptr & MWIFIEX_TXBD_MASK)) ||
((card->txbd_wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) !=
@@ -797,32 +796,31 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Write the TX ring write pointer in to REG_TXBD_WRPTR */
if (mwifiex_write_reg(adapter, REG_TXBD_WRPTR,
- card->txbd_wrptr)) {
- dev_err(adapter->dev, "SEND DATA: failed to write "
- "REG_TXBD_WRPTR\n");
+ card->txbd_wrptr)) {
+ dev_err(adapter->dev,
+ "SEND DATA: failed to write REG_TXBD_WRPTR\n");
return 0;
}
/* Send the TX ready interrupt */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY)) {
- dev_err(adapter->dev, "SEND DATA: failed to assert "
- "door-bell interrupt.\n");
+ dev_err(adapter->dev,
+ "SEND DATA: failed to assert door-bell intr\n");
return -1;
}
dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
- "%#x> and sent packet to firmware "
- "successfully\n", rdptr,
- card->txbd_wrptr);
+ "%#x> and sent packet to firmware successfully\n",
+ rdptr, card->txbd_wrptr);
} else {
- dev_dbg(adapter->dev, "info: TX Ring full, can't send anymore "
- "packets to firmware\n");
+ dev_dbg(adapter->dev,
+ "info: TX Ring full, can't send packets to fw\n");
adapter->data_sent = true;
/* Send the TX ready interrupt */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY))
- dev_err(adapter->dev, "SEND DATA: failed to assert "
- "door-bell interrupt\n");
+ dev_err(adapter->dev,
+ "SEND DATA: failed to assert door-bell intr\n");
return -EBUSY;
}
@@ -842,8 +840,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
/* Read the RX ring Write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
- dev_err(adapter->dev, "RECV DATA: failed to read "
- "REG_TXBD_RDPTR\n");
+ dev_err(adapter->dev,
+ "RECV DATA: failed to read REG_TXBD_RDPTR\n");
ret = -1;
goto done;
}
@@ -861,12 +859,13 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
/* Get data length from interface header -
first byte is len, second byte is type */
rx_len = *((u16 *)skb_data->data);
- dev_dbg(adapter->dev, "info: RECV DATA: Rd=%#x, Wr=%#x, "
- "Len=%d\n", card->rxbd_rdptr, wrptr, rx_len);
+ dev_dbg(adapter->dev,
+ "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
+ card->rxbd_rdptr, wrptr, rx_len);
skb_tmp = dev_alloc_skb(rx_len);
if (!skb_tmp) {
- dev_dbg(adapter->dev, "info: Failed to alloc skb "
- "for RX\n");
+ dev_dbg(adapter->dev,
+ "info: Failed to alloc skb for RX\n");
ret = -EBUSY;
goto done;
}
@@ -881,26 +880,26 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
MWIFIEX_BD_FLAG_ROLLOVER_IND);
}
dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
- card->rxbd_rdptr, wrptr);
+ card->rxbd_rdptr, wrptr);
/* Write the RX ring read pointer in to REG_RXBD_RDPTR */
if (mwifiex_write_reg(adapter, REG_RXBD_RDPTR,
card->rxbd_rdptr)) {
- dev_err(adapter->dev, "RECV DATA: failed to "
- "write REG_RXBD_RDPTR\n");
+ dev_err(adapter->dev,
+ "RECV DATA: failed to write REG_RXBD_RDPTR\n");
ret = -1;
goto done;
}
/* Read the RX ring Write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_RXBD_WRPTR, &wrptr)) {
- dev_err(adapter->dev, "RECV DATA: failed to read "
- "REG_TXBD_RDPTR\n");
+ dev_err(adapter->dev,
+ "RECV DATA: failed to read REG_TXBD_RDPTR\n");
ret = -1;
goto done;
}
- dev_dbg(adapter->dev, "info: RECV DATA: Received packet from "
- "firmware successfully\n");
+ dev_dbg(adapter->dev,
+ "info: RECV DATA: Rcvd packet from fw successfully\n");
mwifiex_handle_rx_packet(adapter, skb_tmp);
}
@@ -919,17 +918,19 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
phys_addr_t *buf_pa = MWIFIEX_SKB_PACB(skb);
if (!(skb->data && skb->len && *buf_pa)) {
- dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x:%x, "
- "%x>\n", __func__, skb->data, skb->len,
- (u32)*buf_pa, (u32)((u64)*buf_pa >> 32));
+ dev_err(adapter->dev,
+ "Invalid parameter in %s <%p, %#x:%x, %x>\n",
+ __func__, skb->data, skb->len,
+ (u32)*buf_pa, (u32)((u64)*buf_pa >> 32));
return -1;
}
/* Write the lower 32bits of the physical address to scratch
* register 0 */
if (mwifiex_write_reg(adapter, PCIE_SCRATCH_0_REG, (u32)*buf_pa)) {
- dev_err(adapter->dev, "%s: failed to write download command "
- "to boot code.\n", __func__);
+ dev_err(adapter->dev,
+ "%s: failed to write download command to boot code.\n",
+ __func__);
return -1;
}
@@ -937,23 +938,25 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
* register 1 */
if (mwifiex_write_reg(adapter, PCIE_SCRATCH_1_REG,
(u32)((u64)*buf_pa >> 32))) {
- dev_err(adapter->dev, "%s: failed to write download command "
- "to boot code.\n", __func__);
+ dev_err(adapter->dev,
+ "%s: failed to write download command to boot code.\n",
+ __func__);
return -1;
}
/* Write the command length to scratch register 2 */
if (mwifiex_write_reg(adapter, PCIE_SCRATCH_2_REG, skb->len)) {
- dev_err(adapter->dev, "%s: failed to write command length to "
- "scratch register 2\n", __func__);
+ dev_err(adapter->dev,
+ "%s: failed to write command len to scratch reg 2\n",
+ __func__);
return -1;
}
/* Ring the door bell */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DOOR_BELL)) {
- dev_err(adapter->dev, "%s: failed to assert door-bell "
- "interrupt.\n", __func__);
+ dev_err(adapter->dev,
+ "%s: failed to assert door-bell intr\n", __func__);
return -1;
}
@@ -973,14 +976,14 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
if (!(skb->data && skb->len)) {
dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
- __func__, skb->data, skb->len);
+ __func__, skb->data, skb->len);
return -1;
}
/* Make sure a command response buffer is available */
if (!card->cmdrsp_buf) {
- dev_err(adapter->dev, "No response buffer available, send "
- "command failed\n");
+ dev_err(adapter->dev,
+ "No response buffer available, send command failed\n");
return -EBUSY;
}
@@ -1011,17 +1014,18 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Write the lower 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO,
- (u32)*cmdrsp_buf_pa)) {
- dev_err(adapter->dev, "Failed to write download command to boot code.\n");
+ (u32)*cmdrsp_buf_pa)) {
+ dev_err(adapter->dev,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI,
- (u32)((u64)*cmdrsp_buf_pa >> 32))) {
- dev_err(adapter->dev, "Failed to write download command"
- " to boot code.\n");
+ (u32)((u64)*cmdrsp_buf_pa >> 32))) {
+ dev_err(adapter->dev,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1029,27 +1033,25 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
cmd_buf_pa = MWIFIEX_SKB_PACB(card->cmd_buf);
/* Write the lower 32bits of the physical address to REG_CMD_ADDR_LO */
- if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO,
- (u32)*cmd_buf_pa)) {
- dev_err(adapter->dev, "Failed to write download command "
- "to boot code.\n");
+ if (mwifiex_write_reg(adapter, REG_CMD_ADDR_LO, (u32)*cmd_buf_pa)) {
+ dev_err(adapter->dev,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
/* Write the upper 32bits of the physical address to REG_CMD_ADDR_HI */
if (mwifiex_write_reg(adapter, REG_CMD_ADDR_HI,
- (u32)((u64)*cmd_buf_pa >> 32))) {
- dev_err(adapter->dev, "Failed to write download command "
- "to boot code.\n");
+ (u32)((u64)*cmd_buf_pa >> 32))) {
+ dev_err(adapter->dev,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
/* Write the command length to REG_CMD_SIZE */
- if (mwifiex_write_reg(adapter, REG_CMD_SIZE,
- card->cmd_buf->len)) {
- dev_err(adapter->dev, "Failed to write command length to "
- "REG_CMD_SIZE\n");
+ if (mwifiex_write_reg(adapter, REG_CMD_SIZE, card->cmd_buf->len)) {
+ dev_err(adapter->dev,
+ "Failed to write cmd len to REG_CMD_SIZE\n");
ret = -1;
goto done;
}
@@ -1057,8 +1059,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Ring the door bell */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DOOR_BELL)) {
- dev_err(adapter->dev, "Failed to assert door-bell "
- "interrupt.\n");
+ dev_err(adapter->dev,
+ "Failed to assert door-bell intr\n");
ret = -1;
goto done;
}
@@ -1076,30 +1078,29 @@ done:
static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ struct sk_buff *skb = card->cmdrsp_buf;
int count = 0;
dev_dbg(adapter->dev, "info: Rx CMD Response\n");
if (!adapter->curr_cmd) {
- skb_pull(card->cmdrsp_buf, INTF_HEADER_LEN);
+ skb_pull(skb, INTF_HEADER_LEN);
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
- mwifiex_process_sleep_confirm_resp(adapter,
- card->cmdrsp_buf->data,
- card->cmdrsp_buf->len);
+ mwifiex_process_sleep_confirm_resp(adapter, skb->data,
+ skb->len);
while (mwifiex_pcie_ok_to_access_hw(adapter) &&
(count++ < 10))
- udelay(50);
+ usleep_range(50, 60);
} else {
- dev_err(adapter->dev, "There is no command but "
- "got cmdrsp\n");
+ dev_err(adapter->dev,
+ "There is no command but got cmdrsp\n");
}
- memcpy(adapter->upld_buf, card->cmdrsp_buf->data,
- min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER,
- card->cmdrsp_buf->len));
- skb_push(card->cmdrsp_buf, INTF_HEADER_LEN);
+ memcpy(adapter->upld_buf, skb->data,
+ min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
+ skb_push(skb, INTF_HEADER_LEN);
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
- skb_pull(card->cmdrsp_buf, INTF_HEADER_LEN);
- adapter->curr_cmd->resp_skb = card->cmdrsp_buf;
+ skb_pull(skb, INTF_HEADER_LEN);
+ adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
/* Take the pointer and set it to CMD node and will
return in the response complete callback */
@@ -1109,15 +1110,15 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
will prevent firmware from writing to the same response
buffer again. */
if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_LO, 0)) {
- dev_err(adapter->dev, "cmd_done: failed to clear "
- "cmd_rsp address.\n");
+ dev_err(adapter->dev,
+ "cmd_done: failed to clear cmd_rsp_addr_lo\n");
return -1;
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, REG_CMDRSP_ADDR_HI, 0)) {
- dev_err(adapter->dev, "cmd_done: failed to clear "
- "cmd_rsp address.\n");
+ dev_err(adapter->dev,
+ "cmd_done: failed to clear cmd_rsp_addr_hi\n");
return -1;
}
}
@@ -1151,8 +1152,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
u32 wrptr, event;
if (adapter->event_received) {
- dev_dbg(adapter->dev, "info: Event being processed, "\
- "do not process this interrupt just yet\n");
+ dev_dbg(adapter->dev, "info: Event being processed, "
+ "do not process this interrupt just yet\n");
return 0;
}
@@ -1163,14 +1164,15 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
- dev_err(adapter->dev, "EventReady: failed to read REG_EVTBD_WRPTR\n");
+ dev_err(adapter->dev,
+ "EventReady: failed to read REG_EVTBD_WRPTR\n");
return -1;
}
dev_dbg(adapter->dev, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
- card->evtbd_rdptr, wrptr);
- if (((wrptr & MWIFIEX_EVTBD_MASK) !=
- (card->evtbd_rdptr & MWIFIEX_EVTBD_MASK)) ||
+ card->evtbd_rdptr, wrptr);
+ if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
+ & MWIFIEX_EVTBD_MASK)) ||
((wrptr & MWIFIEX_BD_FLAG_ROLLOVER_IND) ==
(card->evtbd_rdptr & MWIFIEX_BD_FLAG_ROLLOVER_IND))) {
struct sk_buff *skb_cmd;
@@ -1230,13 +1232,14 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (rdptr >= MWIFIEX_MAX_EVT_BD) {
dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
- rdptr);
+ rdptr);
return -EINVAL;
}
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
- dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
+ dev_err(adapter->dev,
+ "event_complete: failed to read REG_EVTBD_WRPTR\n");
return -1;
}
@@ -1249,9 +1252,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
card->evtbd_ring[rdptr]->flags = 0;
skb = NULL;
} else {
- dev_dbg(adapter->dev, "info: ERROR: Buffer is still valid at "
- "index %d, <%p, %p>\n", rdptr,
- card->evt_buf_list[rdptr], skb);
+ dev_dbg(adapter->dev,
+ "info: ERROR: buf still valid at index %d, <%p, %p>\n",
+ rdptr, card->evt_buf_list[rdptr], skb);
}
if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
@@ -1261,11 +1264,12 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
}
dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
- card->evtbd_rdptr, wrptr);
+ card->evtbd_rdptr, wrptr);
/* Write the event ring read pointer in to REG_EVTBD_RDPTR */
if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
- dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
+ dev_err(adapter->dev,
+ "event_complete: failed to read REG_EVTBD_RDPTR\n");
return -1;
}
@@ -1299,17 +1303,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
}
if (!firmware || !firmware_len) {
- dev_err(adapter->dev, "No firmware image found! "
- "Terminating download\n");
+ dev_err(adapter->dev,
+ "No firmware image found! Terminating download\n");
return -1;
}
dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n",
- firmware_len);
+ firmware_len);
if (mwifiex_pcie_disable_host_int(adapter)) {
- dev_err(adapter->dev, "%s: Disabling interrupts"
- " failed.\n", __func__);
+ dev_err(adapter->dev,
+ "%s: Disabling interrupts failed.\n", __func__);
return -1;
}
@@ -1332,19 +1336,20 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_2_REG,
&len);
if (ret) {
- dev_warn(adapter->dev, "Failed reading length from boot code\n");
+ dev_warn(adapter->dev,
+ "Failed reading len from boot code\n");
goto done;
}
if (len)
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
pr_err("FW download failure @ %d, invalid length %d\n",
- offset, len);
+ offset, len);
ret = -1;
goto done;
}
@@ -1360,8 +1365,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
goto done;
}
dev_err(adapter->dev, "FW CRC error indicated by the "
- "helper: len = 0x%04X, txlen = "
- "%d\n", len, txlen);
+ "helper: len = 0x%04X, txlen = %d\n",
+ len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
@@ -1374,9 +1379,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, ".");
- tx_blocks =
- (txlen + MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) /
- MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD;
+ tx_blocks = (txlen +
+ MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD - 1) /
+ MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD;
/* Copy payload to buffer */
memmove(skb->data, &firmware[offset], txlen);
@@ -1387,7 +1392,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Send the boot command to device */
if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
- dev_err(adapter->dev, "Failed to send firmware download command\n");
+ dev_err(adapter->dev,
+ "Failed to send firmware download command\n");
ret = -1;
goto done;
}
@@ -1396,8 +1402,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
&ireg_intr)) {
dev_err(adapter->dev, "%s: Failed to read "
- "interrupt status during "
- "fw dnld.\n", __func__);
+ "interrupt status during fw dnld.\n",
+ __func__);
ret = -1;
goto done;
}
@@ -1407,7 +1413,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
} while (true);
dev_dbg(adapter->dev, "info:\nFW download over, size %d bytes\n",
- offset);
+ offset);
ret = 0;
@@ -1430,14 +1436,15 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
/* Mask spurios interrupts */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
- HOST_INTR_MASK)) {
+ HOST_INTR_MASK)) {
dev_warn(adapter->dev, "Write register failed\n");
return -1;
}
dev_dbg(adapter->dev, "Setting driver ready signature\n");
if (mwifiex_write_reg(adapter, REG_DRV_READY, FIRMWARE_READY_PCIE)) {
- dev_err(adapter->dev, "Failed to write driver ready signature\n");
+ dev_err(adapter->dev,
+ "Failed to write driver ready signature\n");
return -1;
}
@@ -1468,8 +1475,9 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
adapter->winner = 1;
ret = -1;
} else {
- dev_err(adapter->dev, "PCI-E is not the winner <%#x, %d>, exit download\n",
- ret, adapter->winner);
+ dev_err(adapter->dev,
+ "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+ ret, adapter->winner);
ret = 0;
}
}
@@ -1512,10 +1520,11 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
(adapter->ps_state == PS_STATE_SLEEP)) {
mwifiex_pcie_enable_host_int(adapter);
if (mwifiex_write_reg(adapter,
- PCIE_CPU_INT_EVENT,
- CPU_INTR_SLEEP_CFM_DONE)) {
- dev_warn(adapter->dev, "Write register"
- " failed\n");
+ PCIE_CPU_INT_EVENT,
+ CPU_INTR_SLEEP_CFM_DONE)
+ ) {
+ dev_warn(adapter->dev,
+ "Write register failed\n");
return;
}
@@ -1551,7 +1560,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
- card ? card->adapter : NULL);
+ card ? card->adapter : NULL);
goto exit;
}
adapter = card->adapter;
@@ -1594,7 +1603,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (adapter->int_status & HOST_INTR_DNLD_DONE) {
adapter->int_status &= ~HOST_INTR_DNLD_DONE;
if (adapter->data_sent) {
- dev_dbg(adapter->dev, "info: DATA sent Interrupt\n");
+ dev_dbg(adapter->dev, "info: DATA sent intr\n");
adapter->data_sent = false;
}
}
@@ -1616,7 +1625,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (adapter->int_status & HOST_INTR_CMD_DONE) {
adapter->int_status &= ~HOST_INTR_CMD_DONE;
if (adapter->cmd_sent) {
- dev_dbg(adapter->dev, "info: CMD sent Interrupt\n");
+ dev_dbg(adapter->dev,
+ "info: CMD sent Interrupt\n");
adapter->cmd_sent = false;
}
/* Handle command response */
@@ -1628,15 +1638,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_pcie_ok_to_access_hw(adapter)) {
if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
&pcie_ireg)) {
- dev_warn(adapter->dev, "Read register failed\n");
+ dev_warn(adapter->dev,
+ "Read register failed\n");
return -1;
}
if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
if (mwifiex_write_reg(adapter,
- PCIE_HOST_INT_STATUS, ~pcie_ireg)) {
- dev_warn(adapter->dev, "Write register"
- " failed\n");
+ PCIE_HOST_INT_STATUS,
+ ~pcie_ireg)) {
+ dev_warn(adapter->dev,
+ "Write register failed\n");
return -1;
}
adapter->int_status |= pcie_ireg;
@@ -1646,7 +1658,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
}
}
dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
- adapter->cmd_sent, adapter->data_sent);
+ adapter->cmd_sent, adapter->data_sent);
mwifiex_pcie_enable_host_int(adapter);
return 0;
@@ -1737,8 +1749,9 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
goto err_iomap2;
}
- dev_dbg(adapter->dev, "PCI memory map Virt0: %p PCI memory map Virt2: "
- "%p\n", card->pci_mmap, card->pci_mmap1);
+ dev_dbg(adapter->dev,
+ "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
+ card->pci_mmap, card->pci_mmap1);
card->cmdrsp_buf = NULL;
ret = mwifiex_pcie_create_txbd_ring(adapter);
@@ -1808,7 +1821,8 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "Clearing driver ready signature\n");
if (user_rmmod) {
if (mwifiex_write_reg(adapter, REG_DRV_READY, 0x00000000))
- dev_err(adapter->dev, "Failed to write driver not-ready signature\n");
+ dev_err(adapter->dev,
+ "Failed to write driver not-ready signature\n");
}
if (pdev) {
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 6396d3318ead..aff9cd763f2b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -125,7 +125,7 @@ mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
ieee_hdr.element_id == WLAN_EID_RSN))) {
iebody = (struct ie_body *)
(((u8 *) bss_desc->bcn_rsn_ie->data) +
- RSN_GTK_OUI_OFFSET);
+ RSN_GTK_OUI_OFFSET);
oui = &mwifiex_rsn_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
@@ -148,8 +148,9 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
- if (((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id == WLAN_EID_WPA))) {
+ if (((bss_desc->bcn_wpa_ie) &&
+ ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
+ WLAN_EID_WPA))) {
iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -163,8 +164,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
* This function compares two SSIDs and checks if they match.
*/
s32
-mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
- struct mwifiex_802_11_ssid *ssid2)
+mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2)
{
if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len))
return -1;
@@ -176,8 +176,8 @@ mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
* compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_wapi(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_wapi(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wapi_enabled &&
(bss_desc->bcn_wapi_ie &&
@@ -193,19 +193,17 @@ mwifiex_is_network_compatible_for_wapi(struct mwifiex_private *priv,
* scanned network is compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_no_sec(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
- && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
- && ((!bss_desc->bcn_wpa_ie) ||
+ if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
+ !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
- WLAN_EID_WPA))
- && ((!bss_desc->bcn_rsn_ie) ||
+ WLAN_EID_WPA)) &&
+ ((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
- WLAN_EID_RSN))
- && !priv->sec_info.encryption_mode
- && !bss_desc->privacy) {
+ WLAN_EID_RSN)) &&
+ !priv->sec_info.encryption_mode && !bss_desc->privacy) {
return true;
}
return false;
@@ -216,12 +214,11 @@ mwifiex_is_network_compatible_for_no_sec(struct mwifiex_private *priv,
* is compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_static_wep(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_static_wep(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
- && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
- && bss_desc->privacy) {
+ if (priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
+ !priv->sec_info.wpa2_enabled && bss_desc->privacy) {
return true;
}
return false;
@@ -232,13 +229,12 @@ mwifiex_is_network_compatible_for_static_wep(struct mwifiex_private *priv,
* compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_wpa(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
- && priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
- && ((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
- element_id == WLAN_EID_WPA))
+ if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
+ !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
+ ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == WLAN_EID_WPA))
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
@@ -253,8 +249,7 @@ mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
(bss_desc->bcn_rsn_ie) ?
(*(bss_desc->bcn_rsn_ie)).
ieee_hdr.element_id : 0,
- (priv->sec_info.wep_status ==
- MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
+ (priv->sec_info.wep_enabled) ? "e" : "d",
(priv->sec_info.wpa_enabled) ? "e" : "d",
(priv->sec_info.wpa2_enabled) ? "e" : "d",
priv->sec_info.encryption_mode,
@@ -269,18 +264,18 @@ mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
* compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
- && !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled
- && ((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
- element_id == WLAN_EID_RSN))
- /*
- * Privacy bit may NOT be set in some APs like
- * LinkSys WRT54G && bss_desc->privacy
- */
- ) {
+ if (!priv->sec_info.wep_enabled &&
+ !priv->sec_info.wpa_enabled &&
+ priv->sec_info.wpa2_enabled &&
+ ((bss_desc->bcn_rsn_ie) &&
+ ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) {
+ /*
+ * Privacy bit may NOT be set in some APs like
+ * LinkSys WRT54G && bss_desc->privacy
+ */
dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
"EncMode=%#x privacy=%#x\n", __func__,
@@ -290,8 +285,7 @@ mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
(bss_desc->bcn_rsn_ie) ?
(*(bss_desc->bcn_rsn_ie)).
ieee_hdr.element_id : 0,
- (priv->sec_info.wep_status ==
- MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
+ (priv->sec_info.wep_enabled) ? "e" : "d",
(priv->sec_info.wpa_enabled) ? "e" : "d",
(priv->sec_info.wpa2_enabled) ? "e" : "d",
priv->sec_info.encryption_mode,
@@ -306,17 +300,16 @@ mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
* compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_adhoc_aes(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
- && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
- && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
- element_id != WLAN_EID_WPA))
- && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
- element_id != WLAN_EID_RSN))
- && !priv->sec_info.encryption_mode
- && bss_desc->privacy) {
+ if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
+ !priv->sec_info.wpa2_enabled &&
+ ((!bss_desc->bcn_wpa_ie) ||
+ ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) &&
+ ((!bss_desc->bcn_rsn_ie) ||
+ ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+ !priv->sec_info.encryption_mode && bss_desc->privacy) {
return true;
}
return false;
@@ -327,17 +320,16 @@ mwifiex_is_network_compatible_for_adhoc_aes(struct mwifiex_private *priv,
* is compatible with it.
*/
static bool
-mwifiex_is_network_compatible_for_dynamic_wep(struct mwifiex_private *priv,
- struct mwifiex_bssdescriptor *bss_desc)
+mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
{
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
- && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
- && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
- element_id != WLAN_EID_WPA))
- && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
- element_id != WLAN_EID_RSN))
- && priv->sec_info.encryption_mode
- && bss_desc->privacy) {
+ if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
+ !priv->sec_info.wpa2_enabled &&
+ ((!bss_desc->bcn_wpa_ie) ||
+ ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) &&
+ ((!bss_desc->bcn_rsn_ie) ||
+ ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+ priv->sec_info.encryption_mode && bss_desc->privacy) {
dev_dbg(priv->adapter->dev, "info: %s: dynamic "
"WEP: wpa_ie=%#x wpa2_ie=%#x "
"EncMode=%#x privacy=%#x\n",
@@ -382,8 +374,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
bss_desc->disable_11n = false;
/* Don't check for compatibility if roaming */
- if (priv->media_connected && (priv->bss_mode == NL80211_IFTYPE_STATION)
- && (bss_desc->bss_mode == NL80211_IFTYPE_STATION))
+ if (priv->media_connected &&
+ (priv->bss_mode == NL80211_IFTYPE_STATION) &&
+ (bss_desc->bss_mode == NL80211_IFTYPE_STATION))
return 0;
if (priv->wps.session_enable) {
@@ -392,32 +385,30 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
return 0;
}
- if (mwifiex_is_network_compatible_for_wapi(priv, bss_desc)) {
+ if (mwifiex_is_bss_wapi(priv, bss_desc)) {
dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
return 0;
}
if (bss_desc->bss_mode == mode) {
- if (mwifiex_is_network_compatible_for_no_sec(priv, bss_desc)) {
+ if (mwifiex_is_bss_no_sec(priv, bss_desc)) {
/* No security */
return 0;
- } else if (mwifiex_is_network_compatible_for_static_wep(priv,
- bss_desc)) {
+ } else if (mwifiex_is_bss_static_wep(priv, bss_desc)) {
/* Static WEP enabled */
dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
bss_desc->disable_11n = true;
return 0;
- } else if (mwifiex_is_network_compatible_for_wpa(priv,
- bss_desc)) {
+ } else if (mwifiex_is_bss_wpa(priv, bss_desc)) {
/* WPA enabled */
- if (((priv->adapter->config_bands & BAND_GN
- || priv->adapter->config_bands & BAND_AN)
- && bss_desc->bcn_ht_cap)
- && !mwifiex_is_wpa_oui_present(bss_desc,
- CIPHER_SUITE_CCMP)) {
-
- if (mwifiex_is_wpa_oui_present(bss_desc,
- CIPHER_SUITE_TKIP)) {
+ if (((priv->adapter->config_bands & BAND_GN ||
+ priv->adapter->config_bands & BAND_AN) &&
+ bss_desc->bcn_ht_cap) &&
+ !mwifiex_is_wpa_oui_present(bss_desc,
+ CIPHER_SUITE_CCMP)) {
+
+ if (mwifiex_is_wpa_oui_present
+ (bss_desc, CIPHER_SUITE_TKIP)) {
dev_dbg(adapter->dev,
"info: Disable 11n if AES "
"is not supported by AP\n");
@@ -427,17 +418,16 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
}
}
return 0;
- } else if (mwifiex_is_network_compatible_for_wpa2(priv,
- bss_desc)) {
+ } else if (mwifiex_is_bss_wpa2(priv, bss_desc)) {
/* WPA2 enabled */
- if (((priv->adapter->config_bands & BAND_GN
- || priv->adapter->config_bands & BAND_AN)
- && bss_desc->bcn_ht_cap)
- && !mwifiex_is_rsn_oui_present(bss_desc,
- CIPHER_SUITE_CCMP)) {
-
- if (mwifiex_is_rsn_oui_present(bss_desc,
- CIPHER_SUITE_TKIP)) {
+ if (((priv->adapter->config_bands & BAND_GN ||
+ priv->adapter->config_bands & BAND_AN) &&
+ bss_desc->bcn_ht_cap) &&
+ !mwifiex_is_rsn_oui_present(bss_desc,
+ CIPHER_SUITE_CCMP)) {
+
+ if (mwifiex_is_rsn_oui_present
+ (bss_desc, CIPHER_SUITE_TKIP)) {
dev_dbg(adapter->dev,
"info: Disable 11n if AES "
"is not supported by AP\n");
@@ -447,32 +437,26 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
}
}
return 0;
- } else if (mwifiex_is_network_compatible_for_adhoc_aes(priv,
- bss_desc)) {
+ } else if (mwifiex_is_bss_adhoc_aes(priv, bss_desc)) {
/* Ad-hoc AES enabled */
return 0;
- } else if (mwifiex_is_network_compatible_for_dynamic_wep(priv,
- bss_desc)) {
+ } else if (mwifiex_is_bss_dynamic_wep(priv, bss_desc)) {
/* Dynamic WEP enabled */
return 0;
}
/* Security doesn't match */
- dev_dbg(adapter->dev, "info: %s: failed: "
- "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode"
- "=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).vend_hdr.
- element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).ieee_hdr.
- element_id : 0,
- (priv->sec_info.wep_status ==
- MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode, bss_desc->privacy);
+ dev_dbg(adapter->dev,
+ "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s "
+ "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode, bss_desc->privacy);
return -1;
}
@@ -489,11 +473,11 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
*/
static void
mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
- const struct mwifiex_user_scan_cfg
- *user_scan_in,
- struct mwifiex_chan_scan_param_set
- *scan_chan_list,
- u8 filtered_scan)
+ const struct mwifiex_user_scan_cfg
+ *user_scan_in,
+ struct mwifiex_chan_scan_param_set
+ *scan_chan_list,
+ u8 filtered_scan)
{
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
@@ -515,7 +499,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
scan_chan_list[chan_idx].radio_type = band;
if (user_scan_in &&
- user_scan_in->chan_list[0].scan_time)
+ user_scan_in->chan_list[0].scan_time)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
@@ -604,19 +588,19 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
* - done_early is set (controlling individual scanning of
* 1,6,11)
*/
- while (tlv_idx < max_chan_per_scan
- && tmp_chan_list->chan_number && !done_early) {
+ while (tlv_idx < max_chan_per_scan &&
+ tmp_chan_list->chan_number && !done_early) {
dev_dbg(priv->adapter->dev,
"info: Scan: Chan(%3d), Radio(%d),"
" Mode(%d, %d), Dur(%d)\n",
- tmp_chan_list->chan_number,
- tmp_chan_list->radio_type,
- tmp_chan_list->chan_scan_mode_bitmap
- & MWIFIEX_PASSIVE_SCAN,
- (tmp_chan_list->chan_scan_mode_bitmap
- & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
- le16_to_cpu(tmp_chan_list->max_scan_time));
+ tmp_chan_list->chan_number,
+ tmp_chan_list->radio_type,
+ tmp_chan_list->chan_scan_mode_bitmap
+ & MWIFIEX_PASSIVE_SCAN,
+ (tmp_chan_list->chan_scan_mode_bitmap
+ & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
+ le16_to_cpu(tmp_chan_list->max_scan_time));
/* Copy the current channel TLV to the command being
prepared */
@@ -658,9 +642,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Stop the loop if the *current* channel is in the
1,6,11 set and we are not filtering on a BSSID
or SSID. */
- if (!filtered_scan && (tmp_chan_list->chan_number == 1
- || tmp_chan_list->chan_number == 6
- || tmp_chan_list->chan_number == 11))
+ if (!filtered_scan &&
+ (tmp_chan_list->chan_number == 1 ||
+ tmp_chan_list->chan_number == 6 ||
+ tmp_chan_list->chan_number == 11))
done_early = true;
/* Increment the tmp pointer to the next channel to
@@ -670,9 +655,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Stop the loop if the *next* channel is in the 1,6,11
set. This will cause it to be the only channel
scanned on the next interation */
- if (!filtered_scan && (tmp_chan_list->chan_number == 1
- || tmp_chan_list->chan_number == 6
- || tmp_chan_list->chan_number == 11))
+ if (!filtered_scan &&
+ (tmp_chan_list->chan_number == 1 ||
+ tmp_chan_list->chan_number == 6 ||
+ tmp_chan_list->chan_number == 11))
done_early = true;
}
@@ -724,15 +710,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
* If the number of probes is not set, adapter default setting is used.
*/
static void
-mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
- const struct mwifiex_user_scan_cfg *user_scan_in,
- struct mwifiex_scan_cmd_config *scan_cfg_out,
- struct mwifiex_ie_types_chan_list_param_set
- **chan_list_out,
- struct mwifiex_chan_scan_param_set
- *scan_chan_list,
- u8 *max_chan_per_scan, u8 *filtered_scan,
- u8 *scan_current_only)
+mwifiex_config_scan(struct mwifiex_private *priv,
+ const struct mwifiex_user_scan_cfg *user_scan_in,
+ struct mwifiex_scan_cmd_config *scan_cfg_out,
+ struct mwifiex_ie_types_chan_list_param_set **chan_list_out,
+ struct mwifiex_chan_scan_param_set *scan_chan_list,
+ u8 *max_chan_per_scan, u8 *filtered_scan,
+ u8 *scan_current_only)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
@@ -747,7 +731,7 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
u16 scan_dur;
u8 channel;
u8 radio_type;
- u32 ssid_idx;
+ int i;
u8 ssid_filter;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 rates_size;
@@ -802,14 +786,8 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
- for (ssid_idx = 0;
- ((ssid_idx < ARRAY_SIZE(user_scan_in->ssid_list))
- && (*user_scan_in->ssid_list[ssid_idx].ssid
- || user_scan_in->ssid_list[ssid_idx].max_len));
- ssid_idx++) {
-
- ssid_len = strlen(user_scan_in->ssid_list[ssid_idx].
- ssid) + 1;
+ for (i = 0; i < user_scan_in->num_ssids; i++) {
+ ssid_len = user_scan_in->ssid_list[i].ssid_len;
wildcard_ssid_tlv =
(struct mwifiex_ie_types_wildcard_ssid_params *)
@@ -820,19 +798,26 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
(u16) (ssid_len + sizeof(wildcard_ssid_tlv->
max_ssid_length)));
- /* max_ssid_length = 0 tells firmware to perform
- specific scan for the SSID filled */
- wildcard_ssid_tlv->max_ssid_length = 0;
+ /*
+ * max_ssid_length = 0 tells firmware to perform
+ * specific scan for the SSID filled, whereas
+ * max_ssid_length = IEEE80211_MAX_SSID_LEN is for
+ * wildcard scan.
+ */
+ if (ssid_len)
+ wildcard_ssid_tlv->max_ssid_length = 0;
+ else
+ wildcard_ssid_tlv->max_ssid_length =
+ IEEE80211_MAX_SSID_LEN;
memcpy(wildcard_ssid_tlv->ssid,
- user_scan_in->ssid_list[ssid_idx].ssid,
- ssid_len);
+ user_scan_in->ssid_list[i].ssid, ssid_len);
tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+ le16_to_cpu(wildcard_ssid_tlv->header.len));
- dev_dbg(adapter->dev, "info: scan: ssid_list[%d]: %s, %d\n",
- ssid_idx, wildcard_ssid_tlv->ssid,
+ dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
+ i, wildcard_ssid_tlv->ssid,
wildcard_ssid_tlv->max_ssid_length);
/* Empty wildcard ssid with a maxlen will match many or
@@ -841,7 +826,6 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
filtered. */
if (!ssid_len && wildcard_ssid_tlv->max_ssid_length)
ssid_filter = false;
-
}
/*
@@ -850,9 +834,9 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
* truncate scan results. That is not an issue with an SSID
* or BSSID filter applied to the scan results in the firmware.
*/
- if ((ssid_idx && ssid_filter)
- || memcmp(scan_cfg_out->specific_bssid, &zero_mac,
- sizeof(zero_mac)))
+ if ((i && ssid_filter) ||
+ memcmp(scan_cfg_out->specific_bssid, &zero_mac,
+ sizeof(zero_mac)))
*filtered_scan = true;
} else {
scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
@@ -873,7 +857,7 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
if (num_probes) {
dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
- num_probes);
+ num_probes);
num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
@@ -899,9 +883,9 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
- if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
- && (priv->adapter->config_bands & BAND_GN
- || priv->adapter->config_bands & BAND_AN)) {
+ if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
+ (priv->adapter->config_bands & BAND_GN ||
+ priv->adapter->config_bands & BAND_AN)) {
ht_cap = (struct mwifiex_ie_types_htcap *) tlv_pos;
memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
@@ -930,8 +914,8 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
for (chan_idx = 0;
- chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX
- && user_scan_in->chan_list[chan_idx].chan_number;
+ chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX &&
+ user_scan_in->chan_list[chan_idx].chan_number;
chan_idx++) {
channel = user_scan_in->chan_list[chan_idx].chan_number;
@@ -971,9 +955,9 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
}
/* Check if we are only scanning the current channel */
- if ((chan_idx == 1)
- && (user_scan_in->chan_list[0].chan_number
- == priv->curr_bss_params.bss_descriptor.channel)) {
+ if ((chan_idx == 1) &&
+ (user_scan_in->chan_list[0].chan_number ==
+ priv->curr_bss_params.bss_descriptor.channel)) {
*scan_current_only = true;
dev_dbg(adapter->dev,
"info: Scan: Scanning current channel only\n");
@@ -981,7 +965,7 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
} else {
dev_dbg(adapter->dev,
- "info: Scan: Creating full region channel list\n");
+ "info: Scan: Creating full region channel list\n");
mwifiex_scan_create_channel_list(priv, user_scan_in,
scan_chan_list,
*filtered_scan);
@@ -1013,7 +997,7 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
*tlv_data = NULL;
dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
- tlv_buf_size);
+ tlv_buf_size);
while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
@@ -1110,8 +1094,9 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
- dev_dbg(adapter->dev, "info: InterpretIE: ssid: "
- "%-32s\n", bss_entry->ssid.ssid);
+ dev_dbg(adapter->dev,
+ "info: InterpretIE: ssid: %-32s\n",
+ bss_entry->ssid.ssid);
break;
case WLAN_EID_SUPP_RATES:
@@ -1199,13 +1184,13 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
- bss_entry->wpa_offset = (u16) (current_ptr -
- bss_entry->beacon_buf);
+ bss_entry->wpa_offset = (u16)
+ (current_ptr - bss_entry->beacon_buf);
} else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
- sizeof(struct ieee_types_wmm_parameter)
- || total_ie_len ==
+ sizeof(struct ieee_types_wmm_parameter) ||
+ total_ie_len ==
sizeof(struct ieee_types_wmm_info))
/*
* Only accept and copy the WMM IE if
@@ -1326,14 +1311,14 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
}
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!scan_cfg_out) {
dev_err(adapter->dev, "failed to alloc scan_cfg_out\n");
return -ENOMEM;
}
buf_size = sizeof(struct mwifiex_chan_scan_param_set) *
- MWIFIEX_USER_SCAN_CHAN_MAX;
+ MWIFIEX_USER_SCAN_CHAN_MAX;
scan_chan_list = kzalloc(buf_size, GFP_KERNEL);
if (!scan_chan_list) {
dev_err(adapter->dev, "failed to alloc scan_chan_list\n");
@@ -1341,10 +1326,9 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
return -ENOMEM;
}
- mwifiex_scan_setup_scan_config(priv, user_scan_in,
- &scan_cfg_out->config, &chan_list_out,
- scan_chan_list, &max_chan_per_scan,
- &filtered_scan, &scan_current_chan_only);
+ mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config,
+ &chan_list_out, scan_chan_list, &max_chan_per_scan,
+ &filtered_scan, &scan_current_chan_only);
ret = mwifiex_scan_channel_list(priv, max_chan_per_scan, filtered_scan,
&scan_cfg_out->config, chan_list_out,
@@ -1355,10 +1339,10 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (!list_empty(&adapter->scan_pending_q)) {
cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
+ struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ flags);
adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
@@ -1444,8 +1428,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
if (!bss_desc)
return -1;
- if ((mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
- (u8) bss_desc->bss_band, (u16) bss_desc->channel))) {
+ if ((mwifiex_get_cfp(priv, (u8) bss_desc->bss_band,
+ (u16) bss_desc->channel, 0))) {
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
@@ -1524,7 +1508,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
- sizeof(priv->curr_bss_params.bss_descriptor));
+ sizeof(priv->curr_bss_params.bss_descriptor));
mwifiex_save_curr_bcn(priv);
spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
@@ -1575,7 +1559,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct cfg80211_bss *bss;
is_bgscan_resp = (le16_to_cpu(resp->command)
- == HostCmd_CMD_802_11_BG_SCAN_QUERY);
+ == HostCmd_CMD_802_11_BG_SCAN_QUERY);
if (is_bgscan_resp)
scan_rsp = &resp->params.bg_scan_query_resp.scan_resp;
else
@@ -1584,20 +1568,20 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
- scan_rsp->number_of_sets);
+ scan_rsp->number_of_sets);
ret = -1;
goto done;
}
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
- bytes_left);
+ bytes_left);
scan_resp_size = le16_to_cpu(resp->size);
dev_dbg(adapter->dev,
"info: SCAN_RESP: returned %d APs before parsing\n",
- scan_rsp->number_of_sets);
+ scan_rsp->number_of_sets);
bss_info = scan_rsp->bss_desc_and_tlv_buffer;
@@ -1635,7 +1619,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
s32 rssi;
const u8 *ie_buf;
size_t ie_len;
- int channel = -1;
+ u16 channel = 0;
u64 network_tsf = 0;
u16 beacon_size = 0;
u32 curr_bcn_bytes;
@@ -1673,7 +1657,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
* and capability information
*/
if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
- dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ dev_err(adapter->dev,
+ "InterpretIE: not enough bytes left\n");
continue;
}
bcn_param = (struct mwifiex_bcn_param *)current_ptr;
@@ -1683,20 +1668,20 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
memcpy(bssid, bcn_param->bssid, ETH_ALEN);
rssi = (s32) (bcn_param->rssi);
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n",
- rssi);
+ dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", rssi);
beacon_period = le16_to_cpu(bcn_param->beacon_period);
cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+ cap_info_bitmap);
/* Rest of the current buffer are IE's */
ie_buf = current_ptr;
ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP"
- " = %d\n", curr_bcn_bytes);
+ dev_dbg(adapter->dev,
+ "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
u8 element_id, element_len;
@@ -1705,8 +1690,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
element_len = *(current_ptr + 1);
if (curr_bcn_bytes < element_len +
sizeof(struct ieee_types_header)) {
- dev_err(priv->adapter->dev, "%s: in processing"
- " IE, bytes left < IE length\n",
+ dev_err(priv->adapter->dev,
+ "%s: bytes left < IE length\n",
__func__);
goto done;
}
@@ -1730,10 +1715,10 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
*/
if (tsf_tlv)
memcpy(&network_tsf,
- &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
- sizeof(network_tsf));
+ &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+ sizeof(network_tsf));
- if (channel != -1) {
+ if (channel) {
struct ieee80211_channel *chan;
u8 band;
@@ -1746,8 +1731,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
& (BIT(0) | BIT(1)));
}
- cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(
- priv, (u8)band, (u16)channel);
+ cfp = mwifiex_get_cfp(priv, band, channel, 0);
freq = cfp ? cfp->freq : 0;
@@ -1761,13 +1745,15 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
*(u8 *)bss->priv = band;
cfg80211_put_bss(bss);
- if (priv->media_connected && !memcmp(bssid,
- priv->curr_bss_params.bss_descriptor
- .mac_address, ETH_ALEN))
- mwifiex_update_curr_bss_params(priv,
- bssid, rssi, ie_buf,
- ie_len, beacon_period,
- cap_info_bitmap, band);
+ if (priv->media_connected &&
+ !memcmp(bssid,
+ priv->curr_bss_params.bss_descriptor
+ .mac_address, ETH_ALEN))
+ mwifiex_update_curr_bss_params
+ (priv, bssid, rssi,
+ ie_buf, ie_len,
+ beacon_period,
+ cap_info_bitmap, band);
}
} else {
dev_dbg(adapter->dev, "missing BSS channel IE\n");
@@ -1794,8 +1780,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
if (priv->user_scan_cfg) {
- dev_dbg(priv->adapter->dev, "info: %s: sending scan "
- "results\n", __func__);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: sending scan results\n", __func__);
cfg80211_scan_done(priv->scan_request, 0);
priv->scan_request = NULL;
kfree(priv->user_scan_cfg);
@@ -1860,7 +1846,7 @@ mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
* firmware, filtered on a specific SSID.
*/
static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
@@ -1886,8 +1872,8 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
return -ENOMEM;
}
- memcpy(scan_cfg->ssid_list[0].ssid, req_ssid->ssid,
- req_ssid->ssid_len);
+ scan_cfg->ssid_list = req_ssid;
+ scan_cfg->num_ssids = 1;
ret = mwifiex_scan_networks(priv, scan_cfg);
@@ -1905,13 +1891,13 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
* scan, depending upon whether an SSID is provided or not.
*/
int mwifiex_request_scan(struct mwifiex_private *priv,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
int ret;
if (down_interruptible(&priv->async_sem)) {
dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
- __func__);
+ __func__);
return -1;
}
priv->scan_pending_on_block = true;
@@ -1996,21 +1982,21 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
/* allocate beacon buffer at 1st time; or if it's size has changed */
if (!priv->curr_bcn_buf ||
- priv->curr_bcn_size != curr_bss->beacon_buf_size) {
+ priv->curr_bcn_size != curr_bss->beacon_buf_size) {
priv->curr_bcn_size = curr_bss->beacon_buf_size;
kfree(priv->curr_bcn_buf);
priv->curr_bcn_buf = kmalloc(curr_bss->beacon_buf_size,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!priv->curr_bcn_buf) {
dev_err(priv->adapter->dev,
- "failed to alloc curr_bcn_buf\n");
+ "failed to alloc curr_bcn_buf\n");
return;
}
}
memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
- curr_bss->beacon_buf_size);
+ curr_bss->beacon_buf_size);
dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
priv->curr_bcn_size);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d39d8457f252..f8012e2b7f7c 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -67,13 +67,11 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
struct sdio_mmc_card *card = NULL;
pr_debug("info: vendor=0x%4.04X device=0x%4.04X class=%d function=%d\n",
- func->vendor, func->device, func->class, func->num);
+ func->vendor, func->device, func->class, func->num);
card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL);
- if (!card) {
- pr_err("%s: failed to alloc memory\n", __func__);
+ if (!card)
return -ENOMEM;
- }
card->func = func;
@@ -112,6 +110,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
{
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
+ struct mwifiex_private *priv;
int i;
pr_debug("info: SDIO func num=%d\n", func->num);
@@ -131,15 +130,12 @@ mwifiex_sdio_remove(struct sdio_func *func)
for (i = 0; i < adapter->priv_num; i++)
if ((GET_BSS_ROLE(adapter->priv[i]) ==
MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
+ adapter->priv[i]->media_connected)
mwifiex_deauthenticate(adapter->priv[i], NULL);
- mwifiex_disable_auto_ds(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY));
-
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ mwifiex_disable_auto_ds(priv);
+ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
}
mwifiex_remove_card(card->adapter, &add_remove_card_sem);
@@ -169,7 +165,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
if (func) {
pm_flag = sdio_get_host_pm_caps(func);
pr_debug("cmd: %s: suspend: PM flag = 0x%x\n",
- sdio_func_id(func), pm_flag);
+ sdio_func_id(func), pm_flag);
if (!(pm_flag & MMC_PM_KEEP_POWER)) {
pr_err("%s: cannot remain alive while host is"
" suspended\n", sdio_func_id(func));
@@ -363,12 +359,11 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
{
struct sdio_mmc_card *card = adapter->card;
int ret = -1;
- u8 blk_mode =
- (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
+ u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
+ : BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
- u32 blk_cnt =
- (blk_mode ==
- BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) : len;
+ u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE)
+ : len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
if (claim)
@@ -472,8 +467,7 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
i++;
dev_err(adapter->dev, "host_to_card, write iomem"
" (%d) failed: %d\n", i, ret);
- if (mwifiex_write_reg(adapter,
- CONFIGURATION_REG, 0x04))
+ if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
dev_err(adapter->dev, "write CFG reg failed\n");
ret = -1;
@@ -507,11 +501,11 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK);
*port = CTRL_PORT;
dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n",
- *port, card->mp_rd_bitmap);
+ *port, card->mp_rd_bitmap);
} else {
if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) {
- card->mp_rd_bitmap &=
- (u16) (~(1 << card->curr_rd_port));
+ card->mp_rd_bitmap &= (u16)
+ (~(1 << card->curr_rd_port));
*port = card->curr_rd_port;
if (++card->curr_rd_port == MAX_PORT)
@@ -522,7 +516,7 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
dev_dbg(adapter->dev,
"data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n",
- *port, rd_bitmap, card->mp_rd_bitmap);
+ *port, rd_bitmap, card->mp_rd_bitmap);
}
return 0;
}
@@ -556,14 +550,14 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
if (*port == CTRL_PORT) {
dev_err(adapter->dev, "invalid data port=%d cur port=%d"
- " mp_wr_bitmap=0x%04x -> 0x%04x\n",
- *port, card->curr_wr_port, wr_bitmap,
- card->mp_wr_bitmap);
+ " mp_wr_bitmap=0x%04x -> 0x%04x\n",
+ *port, card->curr_wr_port, wr_bitmap,
+ card->mp_wr_bitmap);
return -1;
}
dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n",
- *port, wr_bitmap, card->mp_wr_bitmap);
+ *port, wr_bitmap, card->mp_wr_bitmap);
return 0;
}
@@ -583,11 +577,11 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
else if ((cs & bits) == bits)
return 0;
- udelay(10);
+ usleep_range(10, 20);
}
- dev_err(adapter->dev, "poll card status failed, tries = %d\n",
- tries);
+ dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries);
+
return -1;
}
@@ -670,14 +664,14 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
if (ret) {
dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
- ret);
+ ret);
return -1;
}
nb = le16_to_cpu(*(__le16 *) (buffer));
if (nb > npayload) {
- dev_err(adapter->dev, "%s: invalid packet, nb=%d, npayload=%d\n",
- __func__, nb, npayload);
+ dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n",
+ __func__, nb, npayload);
return -1;
}
@@ -707,19 +701,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u32 i = 0;
if (!firmware_len) {
- dev_err(adapter->dev, "firmware image not found!"
- " Terminating download\n");
+ dev_err(adapter->dev,
+ "firmware image not found! Terminating download\n");
return -1;
}
dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
- firmware_len);
+ firmware_len);
/* Assume that the allocated buffer is 8-byte aligned */
fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
if (!fwbuf) {
- dev_err(adapter->dev, "unable to alloc buffer for firmware."
- " Terminating download\n");
+ dev_err(adapter->dev,
+ "unable to alloc buffer for FW. Terminating dnld\n");
return -ENOMEM;
}
@@ -731,7 +725,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
DN_LD_CARD_RDY);
if (ret) {
dev_err(adapter->dev, "FW download with helper:"
- " poll status timeout @ %d\n", offset);
+ " poll status timeout @ %d\n", offset);
goto done;
}
@@ -743,17 +737,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0,
&base0);
if (ret) {
- dev_err(adapter->dev, "dev BASE0 register read"
- " failed: base0=0x%04X(%d). Terminating "
- "download\n", base0, base0);
+ dev_err(adapter->dev,
+ "dev BASE0 register read failed: "
+ "base0=%#04X(%d). Terminating dnld\n",
+ base0, base0);
goto done;
}
ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1,
&base1);
if (ret) {
- dev_err(adapter->dev, "dev BASE1 register read"
- " failed: base1=0x%04X(%d). Terminating "
- "download\n", base1, base1);
+ dev_err(adapter->dev,
+ "dev BASE1 register read failed: "
+ "base1=%#04X(%d). Terminating dnld\n",
+ base1, base1);
goto done;
}
len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
@@ -761,14 +757,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (len)
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
- dev_err(adapter->dev, "FW download failed @ %d,"
- " invalid length %d\n", offset, len);
+ dev_err(adapter->dev,
+ "FW dnld failed @ %d, invalid length %d\n",
+ offset, len);
ret = -1;
goto done;
}
@@ -778,13 +775,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (len & BIT(0)) {
i++;
if (i > MAX_WRITE_IOMEM_RETRY) {
- dev_err(adapter->dev, "FW download failed @"
- " %d, over max retry count\n", offset);
+ dev_err(adapter->dev,
+ "FW dnld failed @ %d, over max retry\n",
+ offset);
ret = -1;
goto done;
}
dev_err(adapter->dev, "CRC indicated by the helper:"
- " len = 0x%04X, txlen = %d\n", len, txlen);
+ " len = 0x%04X, txlen = %d\n", len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
@@ -796,8 +794,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (firmware_len - offset < txlen)
txlen = firmware_len - offset;
- tx_blocks = (txlen + MWIFIEX_SDIO_BLOCK_SIZE -
- 1) / MWIFIEX_SDIO_BLOCK_SIZE;
+ tx_blocks = (txlen + MWIFIEX_SDIO_BLOCK_SIZE - 1)
+ / MWIFIEX_SDIO_BLOCK_SIZE;
/* Copy payload to buffer */
memmove(fwbuf, &firmware[offset], txlen);
@@ -807,8 +805,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE,
adapter->ioport);
if (ret) {
- dev_err(adapter->dev, "FW download, write iomem (%d)"
- " failed @ %d\n", i, offset);
+ dev_err(adapter->dev,
+ "FW download, write iomem (%d) failed @ %d\n",
+ i, offset);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
dev_err(adapter->dev, "write CFG reg failed\n");
@@ -820,7 +819,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
} while (true);
dev_dbg(adapter->dev, "info: FW download over, size %d bytes\n",
- offset);
+ offset);
ret = 0;
done:
@@ -912,7 +911,7 @@ mwifiex_sdio_interrupt(struct sdio_func *func)
card = sdio_get_drvdata(func);
if (!card || !card->adapter) {
pr_debug("int: func=%p card=%p adapter=%p\n",
- func, card, card ? card->adapter : NULL);
+ func, card, card ? card->adapter : NULL);
return;
}
adapter = card->adapter;
@@ -955,10 +954,12 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
if (adapter->ps_state == PS_STATE_SLEEP_CFM)
mwifiex_process_sleep_confirm_resp(adapter,
- skb->data, skb->len);
+ skb->data,
+ skb->len);
- memcpy(cmd_buf, skb->data, min_t(u32,
- MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
+ memcpy(cmd_buf, skb->data,
+ min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER,
+ skb->len));
dev_kfree_skb_any(skb);
} else {
@@ -1016,7 +1017,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (port == CTRL_PORT) {
/* Read the command Resp without aggr */
dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
- "response\n", __func__);
+ "response\n", __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
@@ -1024,7 +1025,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (!card->mpa_rx.enabled) {
dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
- __func__);
+ __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
@@ -1071,7 +1072,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
MP_RX_AGGR_PORT_LIMIT_REACHED(card)) {
dev_dbg(adapter->dev, "info: %s: aggregated packet "
- "limit reached\n", __func__);
+ "limit reached\n", __func__);
/* No more pkts allowed in Aggr buf, rx it */
f_do_rx_aggr = 1;
}
@@ -1080,7 +1081,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (f_do_rx_aggr) {
/* do aggr RX now */
dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
- card->mpa_rx.pkt_cnt);
+ card->mpa_rx.pkt_cnt);
if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
card->mpa_rx.buf_len,
@@ -1194,7 +1195,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8;
card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L];
dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n",
- card->mp_wr_bitmap);
+ card->mp_wr_bitmap);
if (adapter->data_sent &&
(card->mp_wr_bitmap & card->mp_data_port_mask)) {
dev_dbg(adapter->dev,
@@ -1216,12 +1217,12 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
}
dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
- adapter->cmd_sent, adapter->data_sent);
+ adapter->cmd_sent, adapter->data_sent);
if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8;
card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L];
dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n",
- card->mp_rd_bitmap);
+ card->mp_rd_bitmap);
while (true) {
ret = mwifiex_get_rd_port(adapter, &port);
@@ -1235,15 +1236,15 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
rx_len |= (u16) card->mp_regs[len_reg_l];
dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
- port, rx_len);
+ port, rx_len);
rx_blocks =
(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1) / MWIFIEX_SDIO_BLOCK_SIZE;
- if (rx_len <= INTF_HEADER_LEN
- || (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
- MWIFIEX_RX_DATA_BUF_SIZE) {
+ if (rx_len <= INTF_HEADER_LEN ||
+ (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
+ MWIFIEX_RX_DATA_BUF_SIZE) {
dev_err(adapter->dev, "invalid rx_len=%d\n",
- rx_len);
+ rx_len);
return -1;
}
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
@@ -1252,42 +1253,42 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (!skb) {
dev_err(adapter->dev, "%s: failed to alloc skb",
- __func__);
+ __func__);
return -1;
}
skb_put(skb, rx_len);
dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
- rx_len, skb->len);
+ rx_len, skb->len);
if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
port)) {
u32 cr = 0;
dev_err(adapter->dev, "card_to_host_mpa failed:"
- " int status=%#x\n", sdio_ireg);
+ " int status=%#x\n", sdio_ireg);
if (mwifiex_read_reg(adapter,
CONFIGURATION_REG, &cr))
dev_err(adapter->dev,
- "read CFG reg failed\n");
+ "read CFG reg failed\n");
dev_dbg(adapter->dev,
- "info: CFG reg val = %d\n", cr);
+ "info: CFG reg val = %d\n", cr);
if (mwifiex_write_reg(adapter,
CONFIGURATION_REG,
(cr | 0x04)))
dev_err(adapter->dev,
- "write CFG reg failed\n");
+ "write CFG reg failed\n");
dev_dbg(adapter->dev, "info: write success\n");
if (mwifiex_read_reg(adapter,
CONFIGURATION_REG, &cr))
dev_err(adapter->dev,
- "read CFG reg failed\n");
+ "read CFG reg failed\n");
dev_dbg(adapter->dev,
- "info: CFG reg val =%x\n", cr);
+ "info: CFG reg val =%x\n", cr);
return -1;
}
}
@@ -1323,7 +1324,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) {
dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
- __func__);
+ __func__);
f_send_cur_buf = 1;
goto tx_curr_single;
@@ -1332,7 +1333,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (next_pkt_len) {
/* More pkt in TX queue */
dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
- __func__);
+ __func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) &&
@@ -1340,9 +1341,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
f_precopy_cur_buf = 1;
if (!(card->mp_wr_bitmap &
- (1 << card->curr_wr_port))
- || !MP_TX_AGGR_BUF_HAS_ROOM(
- card, pkt_len + next_pkt_len))
+ (1 << card->curr_wr_port)) ||
+ !MP_TX_AGGR_BUF_HAS_ROOM(
+ card, pkt_len + next_pkt_len))
f_send_aggr_buf = 1;
} else {
/* No room in Aggr buf, send it */
@@ -1356,8 +1357,8 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
f_postcopy_cur_buf = 1;
}
} else {
- if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)
- && (card->mp_wr_bitmap & (1 << card->curr_wr_port)))
+ if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len) &&
+ (card->mp_wr_bitmap & (1 << card->curr_wr_port)))
f_precopy_cur_buf = 1;
else
f_send_cur_buf = 1;
@@ -1365,7 +1366,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
} else {
/* Last pkt in TX queue */
dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
- __func__);
+ __func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
/* some packs in Aggr buf already */
@@ -1383,7 +1384,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (f_precopy_cur_buf) {
dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
- __func__);
+ __func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
@@ -1394,7 +1395,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (f_send_aggr_buf) {
dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
- __func__,
+ __func__,
card->mpa_tx.start_port, card->mpa_tx.ports);
ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
card->mpa_tx.buf_len,
@@ -1408,14 +1409,14 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
tx_curr_single:
if (f_send_cur_buf) {
dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
- __func__, port);
+ __func__, port);
ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
adapter->ioport + port);
}
if (f_postcopy_cur_buf) {
dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
- __func__);
+ __func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
}
@@ -1460,7 +1461,7 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
ret = mwifiex_get_wr_port_data(adapter, &port);
if (ret) {
dev_err(adapter->dev, "%s: no wr_port available\n",
- __func__);
+ __func__);
return ret;
}
} else {
@@ -1470,7 +1471,7 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (pkt_len <= INTF_HEADER_LEN ||
pkt_len > MWIFIEX_UPLD_SIZE)
dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
- __func__, payload, pkt_len);
+ __func__, payload, pkt_len);
}
/* Transfer data to card */
@@ -1478,10 +1479,11 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (tx_param)
ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
- port, tx_param->next_pkt_len);
+ port, tx_param->next_pkt_len
+ );
else
ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
- port, 0);
+ port, 0);
if (ret) {
if (type == MWIFIEX_TYPE_CMD)
@@ -1734,7 +1736,7 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
card->curr_wr_port = 1;
dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
- port, card->mp_data_port_mask);
+ port, card->mp_data_port_mask);
}
static struct mwifiex_if_ops sdio_ops = {
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 6e443ffa0465..6c8e4594b48b 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -103,76 +103,32 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, u32 cmd_oid,
- u32 *ul_temp)
+ u16 *ul_temp)
{
struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
- - 1 + S_DS_GEN);
+ - 1 + S_DS_GEN);
+ snmp_mib->oid = cpu_to_le16((u16)cmd_oid);
if (cmd_action == HostCmd_ACT_GEN_GET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
- cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
- + MAX_SNMP_BUF_SIZE);
+ le16_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
+ } else if (cmd_action == HostCmd_ACT_GEN_SET) {
+ snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
+ snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
+ *((__le16 *) (snmp_mib->value)) = cpu_to_le16(*ul_temp);
+ le16_add_cpu(&cmd->size, sizeof(u16));
}
- switch (cmd_oid) {
- case FRAG_THRESH_I:
- snmp_mib->oid = cpu_to_le16((u16) FRAG_THRESH_I);
- if (cmd_action == HostCmd_ACT_GEN_SET) {
- snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
- snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- *((__le16 *) (snmp_mib->value)) =
- cpu_to_le16((u16) *ul_temp);
- cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
- + sizeof(u16));
- }
- break;
- case RTS_THRESH_I:
- snmp_mib->oid = cpu_to_le16((u16) RTS_THRESH_I);
- if (cmd_action == HostCmd_ACT_GEN_SET) {
- snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
- snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- *(__le16 *) (snmp_mib->value) =
- cpu_to_le16((u16) *ul_temp);
- cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
- + sizeof(u16));
- }
- break;
-
- case SHORT_RETRY_LIM_I:
- snmp_mib->oid = cpu_to_le16((u16) SHORT_RETRY_LIM_I);
- if (cmd_action == HostCmd_ACT_GEN_SET) {
- snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
- snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- *((__le16 *) (snmp_mib->value)) =
- cpu_to_le16((u16) *ul_temp);
- cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
- + sizeof(u16));
- }
- break;
- case DOT11D_I:
- snmp_mib->oid = cpu_to_le16((u16) DOT11D_I);
- if (cmd_action == HostCmd_ACT_GEN_SET) {
- snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
- snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- *((__le16 *) (snmp_mib->value)) =
- cpu_to_le16((u16) *ul_temp);
- cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
- + sizeof(u16));
- }
- break;
- default:
- break;
- }
dev_dbg(priv->adapter->dev,
"cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
" Value=0x%x\n",
- cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
- le16_to_cpu(*(__le16 *) snmp_mib->value));
+ cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
+ le16_to_cpu(*(__le16 *) snmp_mib->value));
return 0;
}
@@ -218,8 +174,8 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
rate_scope = (struct mwifiex_rate_scope *) ((u8 *) rate_cfg +
sizeof(struct host_cmd_ds_tx_rate_cfg));
rate_scope->type = cpu_to_le16(TLV_TYPE_RATE_SCOPE);
- rate_scope->length = cpu_to_le16(sizeof(struct mwifiex_rate_scope) -
- sizeof(struct mwifiex_ie_types_header));
+ rate_scope->length = cpu_to_le16
+ (sizeof(*rate_scope) - sizeof(struct mwifiex_ie_types_header));
if (pbitmap_rates != NULL) {
rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
@@ -241,7 +197,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
}
rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
- sizeof(struct mwifiex_rate_scope));
+ sizeof(struct mwifiex_rate_scope));
rate_drop->type = cpu_to_le16(TLV_TYPE_RATE_DROP_CONTROL);
rate_drop->length = cpu_to_le16(sizeof(rate_drop->rate_drop_mode));
rate_drop->rate_drop_mode = 0;
@@ -328,22 +284,22 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
if (!hs_activate &&
- (hscfg_param->conditions
- != cpu_to_le32(HOST_SLEEP_CFG_CANCEL))
- && ((adapter->arp_filter_size > 0)
- && (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
+ (hscfg_param->conditions != cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) &&
+ ((adapter->arp_filter_size > 0) &&
+ (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
dev_dbg(adapter->dev,
"cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
- adapter->arp_filter_size);
+ adapter->arp_filter_size);
memcpy(((u8 *) hs_cfg) +
sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
adapter->arp_filter, adapter->arp_filter_size);
- cmd->size = cpu_to_le16(adapter->arp_filter_size +
- sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
- + S_DS_GEN);
+ cmd->size = cpu_to_le16
+ (adapter->arp_filter_size +
+ sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
+ + S_DS_GEN);
} else {
cmd->size = cpu_to_le16(S_DS_GEN + sizeof(struct
- host_cmd_ds_802_11_hs_cfg_enh));
+ host_cmd_ds_802_11_hs_cfg_enh));
}
if (hs_activate) {
hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
@@ -511,7 +467,7 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
key_param_set =
(struct mwifiex_ie_type_key_param_set *)
((u8 *)key_param_set +
- cur_key_param_len);
+ cur_key_param_len);
} else if (!priv->wep_key[i].key_length) {
continue;
} else {
@@ -571,13 +527,13 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
if (enc_key->is_wapi_key) {
dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
key_material->key_param_set.key_type_id =
- cpu_to_le16(KEY_TYPE_ID_WAPI);
+ cpu_to_le16(KEY_TYPE_ID_WAPI);
if (cmd_oid == KEY_INFO_ENABLED)
key_material->key_param_set.key_info =
- cpu_to_le16(KEY_ENABLED);
+ cpu_to_le16(KEY_ENABLED);
else
key_material->key_param_set.key_info =
- cpu_to_le16(!KEY_ENABLED);
+ cpu_to_le16(!KEY_ENABLED);
key_material->key_param_set.key[0] = enc_key->key_index;
if (!priv->sec_info.wapi_key_on)
@@ -597,9 +553,9 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
}
key_material->key_param_set.type =
- cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+ cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
key_material->key_param_set.key_len =
- cpu_to_le16(WAPI_KEY_LEN);
+ cpu_to_le16(WAPI_KEY_LEN);
memcpy(&key_material->key_param_set.key[2],
enc_key->key_material, enc_key->key_len);
memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
@@ -609,49 +565,49 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
key_param_len = (WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN) +
sizeof(struct mwifiex_ie_types_header);
- cmd->size = cpu_to_le16(key_param_len +
- sizeof(key_material->action) + S_DS_GEN);
+ cmd->size = cpu_to_le16(sizeof(key_material->action)
+ + S_DS_GEN + key_param_len);
return ret;
}
if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
key_material->key_param_set.key_type_id =
- cpu_to_le16(KEY_TYPE_ID_AES);
+ cpu_to_le16(KEY_TYPE_ID_AES);
if (cmd_oid == KEY_INFO_ENABLED)
key_material->key_param_set.key_info =
- cpu_to_le16(KEY_ENABLED);
+ cpu_to_le16(KEY_ENABLED);
else
key_material->key_param_set.key_info =
- cpu_to_le16(!KEY_ENABLED);
+ cpu_to_le16(!KEY_ENABLED);
if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
/* AES pairwise key: unicast */
key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_UNICAST);
+ cpu_to_le16(KEY_UNICAST);
else /* AES group key: multicast */
key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_MCAST);
+ cpu_to_le16(KEY_MCAST);
} else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
key_material->key_param_set.key_type_id =
- cpu_to_le16(KEY_TYPE_ID_TKIP);
+ cpu_to_le16(KEY_TYPE_ID_TKIP);
key_material->key_param_set.key_info =
- cpu_to_le16(KEY_ENABLED);
+ cpu_to_le16(KEY_ENABLED);
if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
/* TKIP pairwise key: unicast */
key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_UNICAST);
+ cpu_to_le16(KEY_UNICAST);
else /* TKIP group key: multicast */
key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_MCAST);
+ cpu_to_le16(KEY_MCAST);
}
if (key_material->key_param_set.key_type_id) {
key_material->key_param_set.type =
- cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+ cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
key_material->key_param_set.key_len =
- cpu_to_le16((u16) enc_key->key_len);
+ cpu_to_le16((u16) enc_key->key_len);
memcpy(key_material->key_param_set.key, enc_key->key_material,
enc_key->key_len);
key_material->key_param_set.length =
@@ -659,10 +615,10 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
KEYPARAMSET_FIXED_LEN);
key_param_len = (u16) (enc_key->key_len + KEYPARAMSET_FIXED_LEN)
- + sizeof(struct mwifiex_ie_types_header);
+ + sizeof(struct mwifiex_ie_types_header);
- cmd->size = cpu_to_le16(key_param_len +
- sizeof(key_material->action) + S_DS_GEN);
+ cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
+ + key_param_len);
}
return ret;
@@ -699,21 +655,22 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
/* Set domain info fields */
domain->header.type = cpu_to_le16(WLAN_EID_COUNTRY);
memcpy(domain->country_code, adapter->domain_reg.country_code,
- sizeof(domain->country_code));
+ sizeof(domain->country_code));
- domain->header.len = cpu_to_le16((no_of_triplet *
- sizeof(struct ieee80211_country_ie_triplet)) +
- sizeof(domain->country_code));
+ domain->header.len =
+ cpu_to_le16((no_of_triplet *
+ sizeof(struct ieee80211_country_ie_triplet))
+ + sizeof(domain->country_code));
if (no_of_triplet) {
memcpy(domain->triplet, adapter->domain_reg.triplet,
- no_of_triplet *
- sizeof(struct ieee80211_country_ie_triplet));
+ no_of_triplet * sizeof(struct
+ ieee80211_country_ie_triplet));
cmd->size = cpu_to_le16(sizeof(domain_info->action) +
- le16_to_cpu(domain->header.len) +
- sizeof(struct mwifiex_ie_types_header)
- + S_DS_GEN);
+ le16_to_cpu(domain->header.len) +
+ sizeof(struct mwifiex_ie_types_header)
+ + S_DS_GEN);
} else {
cmd->size = cpu_to_le16(sizeof(domain_info->action) + S_DS_GEN);
}
@@ -742,8 +699,8 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
+ S_DS_GEN);
if (cmd_action == HostCmd_ACT_GEN_SET) {
- if ((priv->adapter->adhoc_start_band & BAND_A)
- || (priv->adapter->adhoc_start_band & BAND_AN))
+ if ((priv->adapter->adhoc_start_band & BAND_A) ||
+ (priv->adapter->adhoc_start_band & BAND_AN))
rf_chan->rf_type =
cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
@@ -821,7 +778,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd->
- params.mac_reg;
+ params.mac_reg;
mac_reg->action = cpu_to_le16(cmd_action);
mac_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -833,8 +790,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_bbp_reg_access *bbp_reg;
cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
- bbp_reg = (struct host_cmd_ds_bbp_reg_access *) &cmd->
- params.bbp_reg;
+ bbp_reg = (struct host_cmd_ds_bbp_reg_access *)
+ &cmd->params.bbp_reg;
bbp_reg->action = cpu_to_le16(cmd_action);
bbp_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -846,11 +803,10 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *rf_reg;
cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
- rf_reg = (struct host_cmd_ds_rf_reg_access *) &cmd->
- params.rf_reg;
+ rf_reg = (struct host_cmd_ds_rf_reg_access *)
+ &cmd->params.rf_reg;
rf_reg->action = cpu_to_le16(cmd_action);
- rf_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
+ rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
break;
}
@@ -863,7 +819,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
params.pmic_reg;
pmic_reg->action = cpu_to_le16(cmd_action);
pmic_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
+ cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
pmic_reg->value = (u8) le32_to_cpu(reg_rw->value);
break;
}
@@ -872,11 +828,11 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *cau_reg;
cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
- cau_reg = (struct host_cmd_ds_rf_reg_access *) &cmd->
- params.rf_reg;
+ cau_reg = (struct host_cmd_ds_rf_reg_access *)
+ &cmd->params.rf_reg;
cau_reg->action = cpu_to_le16(cmd_action);
cau_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
+ cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
cau_reg->value = (u8) le32_to_cpu(reg_rw->value);
break;
}
@@ -912,7 +868,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
*/
static int
mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd, u16 action)
+ struct host_cmd_ds_command *cmd, u16 action)
{
struct host_cmd_ds_pcie_details *host_spec =
&cmd->params.pcie_host_spec;
@@ -926,29 +882,25 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
memset(host_spec, 0, sizeof(struct host_cmd_ds_pcie_details));
- if (action == HostCmd_ACT_GEN_SET) {
- /* Send the ring base addresses and count to firmware */
- host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
- host_spec->txbd_addr_hi =
- (u32)(((u64)card->txbd_ring_pbase)>>32);
- host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
- host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
- host_spec->rxbd_addr_hi =
- (u32)(((u64)card->rxbd_ring_pbase)>>32);
- host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
- host_spec->evtbd_addr_lo =
- (u32)(card->evtbd_ring_pbase);
- host_spec->evtbd_addr_hi =
- (u32)(((u64)card->evtbd_ring_pbase)>>32);
- host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
- if (card->sleep_cookie) {
- buf_pa = MWIFIEX_SKB_PACB(card->sleep_cookie);
- host_spec->sleep_cookie_addr_lo = (u32) *buf_pa;
- host_spec->sleep_cookie_addr_hi =
- (u32) (((u64)*buf_pa) >> 32);
- dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: "
- "0x%x\n", host_spec->sleep_cookie_addr_lo);
- }
+ if (action != HostCmd_ACT_GEN_SET)
+ return 0;
+
+ /* Send the ring base addresses and count to firmware */
+ host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
+ host_spec->txbd_addr_hi = (u32)(((u64)card->txbd_ring_pbase)>>32);
+ host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
+ host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
+ host_spec->rxbd_addr_hi = (u32)(((u64)card->rxbd_ring_pbase)>>32);
+ host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
+ host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase);
+ host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32);
+ host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
+ if (card->sleep_cookie) {
+ buf_pa = MWIFIEX_SKB_PACB(card->sleep_cookie);
+ host_spec->sleep_cookie_addr_lo = (u32) *buf_pa;
+ host_spec->sleep_cookie_addr_hi = (u32) (((u64)*buf_pa) >> 32);
+ dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n",
+ host_spec->sleep_cookie_addr_lo);
}
return 0;
@@ -1080,12 +1032,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
break;
case HostCmd_CMD_802_11_KEY_MATERIAL:
ret = mwifiex_cmd_802_11_key_material(priv, cmd_ptr,
- cmd_action, cmd_oid,
- data_buf);
+ cmd_action, cmd_oid,
+ data_buf);
break;
case HostCmd_CMD_802_11D_DOMAIN_INFO:
ret = mwifiex_cmd_802_11d_domain_info(priv, cmd_ptr,
- cmd_action);
+ cmd_action);
break;
case HostCmd_CMD_RECONFIGURE_TX_BUFF:
ret = mwifiex_cmd_recfg_tx_buf(priv, cmd_ptr, cmd_action,
@@ -1096,8 +1048,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
data_buf);
break;
case HostCmd_CMD_11N_CFG:
- ret = mwifiex_cmd_11n_cfg(cmd_ptr, cmd_action,
- data_buf);
+ ret = mwifiex_cmd_11n_cfg(cmd_ptr, cmd_action, data_buf);
break;
case HostCmd_CMD_WMM_GET_STATUS:
dev_dbg(priv->adapter->dev,
@@ -1175,8 +1126,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_PCIE_DESC_DETAILS,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ HostCmd_CMD_PCIE_DESC_DETAILS,
+ HostCmd_ACT_GEN_SET, 0, NULL);
if (ret)
return -1;
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index e812db8b695c..4da19ed0f078 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -49,7 +49,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
unsigned long flags;
dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
- resp->command, resp->result);
+ resp->command, resp->result);
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
@@ -57,13 +57,13 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
switch (le16_to_cpu(resp->command)) {
case HostCmd_CMD_802_11_PS_MODE_ENH:
pm = &resp->params.psmode_enh;
- dev_err(adapter->dev, "PS_MODE_ENH cmd failed: "
- "result=0x%x action=0x%X\n",
- resp->result, le16_to_cpu(pm->action));
+ dev_err(adapter->dev,
+ "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
+ resp->result, le16_to_cpu(pm->action));
/* We do not re-try enter-ps command in ad-hoc mode. */
if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
- (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
- priv->bss_mode == NL80211_IFTYPE_ADHOC)
+ (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
+ priv->bss_mode == NL80211_IFTYPE_ADHOC)
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
break;
@@ -123,7 +123,7 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_signal *signal)
{
struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
- &resp->params.rssi_info_rsp;
+ &resp->params.rssi_info_rsp;
priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -191,8 +191,8 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
u32 ul_temp;
dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
- " query_type = %#x, buf size = %#x\n",
- oid, query_type, le16_to_cpu(smib->buf_size));
+ " query_type = %#x, buf size = %#x\n",
+ oid, query_type, le16_to_cpu(smib->buf_size));
if (query_type == HostCmd_ACT_GEN_GET) {
ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
if (data_buf)
@@ -210,6 +210,9 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev,
"info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
break;
+ case DTIM_PERIOD_I:
+ dev_dbg(priv->adapter->dev,
+ "info: SNMP_RESP: DTIM period=%u\n", ul_temp);
default:
break;
}
@@ -324,31 +327,26 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
HostCmd_CMD_802_11_TX_RATE_QUERY,
HostCmd_ACT_GEN_GET, 0, NULL);
- if (ds_rate) {
- if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
- if (priv->is_data_rate_auto) {
- ds_rate->is_rate_auto = 1;
- } else {
- ds_rate->rate = mwifiex_get_rate_index(priv->
- bitmap_rates,
- sizeof(priv->
- bitmap_rates));
- if (ds_rate->rate >=
- MWIFIEX_RATE_BITMAP_OFDM0
- && ds_rate->rate <=
- MWIFIEX_RATE_BITMAP_OFDM7)
- ds_rate->rate -=
- (MWIFIEX_RATE_BITMAP_OFDM0 -
- MWIFIEX_RATE_INDEX_OFDM0);
- if (ds_rate->rate >=
- MWIFIEX_RATE_BITMAP_MCS0
- && ds_rate->rate <=
- MWIFIEX_RATE_BITMAP_MCS127)
- ds_rate->rate -=
- (MWIFIEX_RATE_BITMAP_MCS0 -
- MWIFIEX_RATE_INDEX_MCS0);
- }
- }
+ if (!ds_rate)
+ return ret;
+
+ if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
+ if (priv->is_data_rate_auto) {
+ ds_rate->is_rate_auto = 1;
+ return ret;
+ }
+ ds_rate->rate = mwifiex_get_rate_index(priv->bitmap_rates,
+ sizeof(priv->bitmap_rates));
+
+ if (ds_rate->rate >= MWIFIEX_RATE_BITMAP_OFDM0 &&
+ ds_rate->rate <= MWIFIEX_RATE_BITMAP_OFDM7)
+ ds_rate->rate -= (MWIFIEX_RATE_BITMAP_OFDM0 -
+ MWIFIEX_RATE_INDEX_OFDM0);
+
+ if (ds_rate->rate >= MWIFIEX_RATE_BITMAP_MCS0 &&
+ ds_rate->rate <= MWIFIEX_RATE_BITMAP_MCS127)
+ ds_rate->rate -= (MWIFIEX_RATE_BITMAP_MCS0 -
+ MWIFIEX_RATE_INDEX_MCS0);
}
return ret;
@@ -366,34 +364,32 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
struct mwifiex_types_power_group *pg_tlv_hdr;
struct mwifiex_power_group *pg;
- if (data_buf) {
- pg_tlv_hdr =
- (struct mwifiex_types_power_group *) ((u8 *) data_buf
- + sizeof(struct host_cmd_ds_txpwr_cfg));
- pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr +
- sizeof(struct mwifiex_types_power_group));
- length = pg_tlv_hdr->length;
- if (length > 0) {
+ if (!data_buf)
+ return -1;
+
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)
+ ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg));
+ pg = (struct mwifiex_power_group *)
+ ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group));
+ length = pg_tlv_hdr->length;
+ if (length > 0) {
+ max_power = pg->power_max;
+ min_power = pg->power_min;
+ length -= sizeof(struct mwifiex_power_group);
+ }
+ while (length) {
+ pg++;
+ if (max_power < pg->power_max)
max_power = pg->power_max;
- min_power = pg->power_min;
- length -= sizeof(struct mwifiex_power_group);
- }
- while (length) {
- pg++;
- if (max_power < pg->power_max)
- max_power = pg->power_max;
- if (min_power > pg->power_min)
- min_power = pg->power_min;
+ if (min_power > pg->power_min)
+ min_power = pg->power_min;
- length -= sizeof(struct mwifiex_power_group);
- }
- if (pg_tlv_hdr->length > 0) {
- priv->min_tx_power_level = (u8) min_power;
- priv->max_tx_power_level = (u8) max_power;
- }
- } else {
- return -1;
+ length -= sizeof(struct mwifiex_power_group);
+ }
+ if (pg_tlv_hdr->length > 0) {
+ priv->min_tx_power_level = (u8) min_power;
+ priv->max_tx_power_level = (u8) max_power;
}
return 0;
@@ -417,42 +413,38 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
switch (action) {
case HostCmd_ACT_GEN_GET:
- {
- pg_tlv_hdr =
- (struct mwifiex_types_power_group *) ((u8 *)
- txp_cfg +
- sizeof
- (struct
- host_cmd_ds_txpwr_cfg));
- pg = (struct mwifiex_power_group *) ((u8 *)
- pg_tlv_hdr +
- sizeof(struct
- mwifiex_types_power_group));
- if (adapter->hw_status ==
- MWIFIEX_HW_STATUS_INITIALIZING)
- mwifiex_get_power_level(priv, txp_cfg);
- priv->tx_power_level = (u16) pg->power_min;
- break;
- }
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)
+ ((u8 *) txp_cfg +
+ sizeof(struct host_cmd_ds_txpwr_cfg));
+
+ pg = (struct mwifiex_power_group *)
+ ((u8 *) pg_tlv_hdr +
+ sizeof(struct mwifiex_types_power_group));
+
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+ mwifiex_get_power_level(priv, txp_cfg);
+
+ priv->tx_power_level = (u16) pg->power_min;
+ break;
+
case HostCmd_ACT_GEN_SET:
- if (le32_to_cpu(txp_cfg->mode)) {
- pg_tlv_hdr =
- (struct mwifiex_types_power_group *) ((u8 *)
- txp_cfg +
- sizeof
- (struct
- host_cmd_ds_txpwr_cfg));
- pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr
- +
- sizeof(struct
- mwifiex_types_power_group));
- if (pg->power_max == pg->power_min)
- priv->tx_power_level = (u16) pg->power_min;
- }
+ if (!le32_to_cpu(txp_cfg->mode))
+ break;
+
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)
+ ((u8 *) txp_cfg +
+ sizeof(struct host_cmd_ds_txpwr_cfg));
+
+ pg = (struct mwifiex_power_group *)
+ ((u8 *) pg_tlv_hdr +
+ sizeof(struct mwifiex_types_power_group));
+
+ if (pg->power_max == pg->power_min)
+ priv->tx_power_level = (u16) pg->power_min;
break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
- action);
+ action);
return 0;
}
dev_dbg(adapter->dev,
@@ -472,7 +464,7 @@ static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_mac_address *cmd_mac_addr =
- &resp->params.mac_addr;
+ &resp->params.mac_addr;
memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
@@ -557,7 +549,7 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_key_material *key =
- &resp->params.key_material;
+ &resp->params.key_material;
if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
@@ -588,17 +580,18 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
u16 action = le16_to_cpu(domain_info->action);
u8 no_of_triplet;
- no_of_triplet = (u8) ((le16_to_cpu(domain->header.len) -
- IEEE80211_COUNTRY_STRING_LEN) /
- sizeof(struct ieee80211_country_ie_triplet));
+ no_of_triplet = (u8) ((le16_to_cpu(domain->header.len)
+ - IEEE80211_COUNTRY_STRING_LEN)
+ / sizeof(struct ieee80211_country_ie_triplet));
- dev_dbg(priv->adapter->dev, "info: 11D Domain Info Resp:"
- " no_of_triplet=%d\n", no_of_triplet);
+ dev_dbg(priv->adapter->dev,
+ "info: 11D Domain Info Resp: no_of_triplet=%d\n",
+ no_of_triplet);
if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
dev_warn(priv->adapter->dev,
- "11D: invalid number of triplets %d "
- "returned!!\n", no_of_triplet);
+ "11D: invalid number of triplets %d returned\n",
+ no_of_triplet);
return -1;
}
@@ -632,8 +625,8 @@ static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
if (priv->curr_bss_params.bss_descriptor.channel != new_channel) {
dev_dbg(priv->adapter->dev, "cmd: Channel Switch: %d to %d\n",
- priv->curr_bss_params.bss_descriptor.channel,
- new_channel);
+ priv->curr_bss_params.bss_descriptor.channel,
+ new_channel);
/* Update the channel again */
priv->curr_bss_params.bss_descriptor.channel = new_channel;
}
@@ -676,90 +669,70 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
{
struct mwifiex_ds_reg_rw *reg_rw;
struct mwifiex_ds_read_eeprom *eeprom;
+ union reg {
+ struct host_cmd_ds_mac_reg_access *mac;
+ struct host_cmd_ds_bbp_reg_access *bbp;
+ struct host_cmd_ds_rf_reg_access *rf;
+ struct host_cmd_ds_pmic_reg_access *pmic;
+ struct host_cmd_ds_802_11_eeprom_access *eeprom;
+ } r;
+
+ if (!data_buf)
+ return 0;
- if (data_buf) {
- reg_rw = data_buf;
- eeprom = data_buf;
- switch (type) {
- case HostCmd_CMD_MAC_REG_ACCESS:
- {
- struct host_cmd_ds_mac_reg_access *reg;
- reg = (struct host_cmd_ds_mac_reg_access *)
- &resp->params.mac_reg;
- reg_rw->offset = cpu_to_le32(
- (u32) le16_to_cpu(reg->offset));
- reg_rw->value = reg->value;
- break;
- }
- case HostCmd_CMD_BBP_REG_ACCESS:
- {
- struct host_cmd_ds_bbp_reg_access *reg;
- reg = (struct host_cmd_ds_bbp_reg_access *)
- &resp->params.bbp_reg;
- reg_rw->offset = cpu_to_le32(
- (u32) le16_to_cpu(reg->offset));
- reg_rw->value = cpu_to_le32((u32) reg->value);
- break;
- }
-
- case HostCmd_CMD_RF_REG_ACCESS:
- {
- struct host_cmd_ds_rf_reg_access *reg;
- reg = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
- reg_rw->offset = cpu_to_le32(
- (u32) le16_to_cpu(reg->offset));
- reg_rw->value = cpu_to_le32((u32) reg->value);
- break;
- }
- case HostCmd_CMD_PMIC_REG_ACCESS:
- {
- struct host_cmd_ds_pmic_reg_access *reg;
- reg = (struct host_cmd_ds_pmic_reg_access *)
- &resp->params.pmic_reg;
- reg_rw->offset = cpu_to_le32(
- (u32) le16_to_cpu(reg->offset));
- reg_rw->value = cpu_to_le32((u32) reg->value);
- break;
- }
- case HostCmd_CMD_CAU_REG_ACCESS:
- {
- struct host_cmd_ds_rf_reg_access *reg;
- reg = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
- reg_rw->offset = cpu_to_le32(
- (u32) le16_to_cpu(reg->offset));
- reg_rw->value = cpu_to_le32((u32) reg->value);
- break;
- }
- case HostCmd_CMD_802_11_EEPROM_ACCESS:
- {
- struct host_cmd_ds_802_11_eeprom_access
- *cmd_eeprom =
- (struct host_cmd_ds_802_11_eeprom_access
- *) &resp->params.eeprom;
- pr_debug("info: EEPROM read len=%x\n",
- cmd_eeprom->byte_count);
- if (le16_to_cpu(eeprom->byte_count) <
- le16_to_cpu(
- cmd_eeprom->byte_count)) {
- eeprom->byte_count = cpu_to_le16(0);
- pr_debug("info: EEPROM read "
- "length is too big\n");
- return -1;
- }
- eeprom->offset = cmd_eeprom->offset;
- eeprom->byte_count = cmd_eeprom->byte_count;
- if (le16_to_cpu(eeprom->byte_count) > 0)
- memcpy(&eeprom->value,
- &cmd_eeprom->value,
- le16_to_cpu(eeprom->byte_count));
-
- break;
- }
- default:
+ reg_rw = data_buf;
+ eeprom = data_buf;
+ switch (type) {
+ case HostCmd_CMD_MAC_REG_ACCESS:
+ r.mac = (struct host_cmd_ds_mac_reg_access *)
+ &resp->params.mac_reg;
+ reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
+ reg_rw->value = r.mac->value;
+ break;
+ case HostCmd_CMD_BBP_REG_ACCESS:
+ r.bbp = (struct host_cmd_ds_bbp_reg_access *)
+ &resp->params.bbp_reg;
+ reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
+ reg_rw->value = cpu_to_le32((u32) r.bbp->value);
+ break;
+
+ case HostCmd_CMD_RF_REG_ACCESS:
+ r.rf = (struct host_cmd_ds_rf_reg_access *)
+ &resp->params.rf_reg;
+ reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
+ reg_rw->value = cpu_to_le32((u32) r.bbp->value);
+ break;
+ case HostCmd_CMD_PMIC_REG_ACCESS:
+ r.pmic = (struct host_cmd_ds_pmic_reg_access *)
+ &resp->params.pmic_reg;
+ reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
+ reg_rw->value = cpu_to_le32((u32) r.pmic->value);
+ break;
+ case HostCmd_CMD_CAU_REG_ACCESS:
+ r.rf = (struct host_cmd_ds_rf_reg_access *)
+ &resp->params.rf_reg;
+ reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
+ reg_rw->value = cpu_to_le32((u32) r.rf->value);
+ break;
+ case HostCmd_CMD_802_11_EEPROM_ACCESS:
+ r.eeprom = (struct host_cmd_ds_802_11_eeprom_access *)
+ &resp->params.eeprom;
+ pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
+ if (le16_to_cpu(eeprom->byte_count) <
+ le16_to_cpu(r.eeprom->byte_count)) {
+ eeprom->byte_count = cpu_to_le16(0);
+ pr_debug("info: EEPROM read length is too big\n");
return -1;
}
+ eeprom->offset = r.eeprom->offset;
+ eeprom->byte_count = r.eeprom->byte_count;
+ if (le16_to_cpu(eeprom->byte_count) > 0)
+ memcpy(&eeprom->value, &r.eeprom->value,
+ le16_to_cpu(r.eeprom->byte_count));
+
+ break;
+ default:
+ return -1;
}
return 0;
}
@@ -775,7 +748,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
- &(resp->params.ibss_coalescing);
+ &(resp->params.ibss_coalescing);
u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
@@ -915,20 +888,17 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_RECONFIGURE_TX_BUFF:
adapter->tx_buf_size = (u16) le16_to_cpu(resp->params.
tx_buf.buff_size);
- adapter->tx_buf_size = (adapter->tx_buf_size /
- MWIFIEX_SDIO_BLOCK_SIZE) *
- MWIFIEX_SDIO_BLOCK_SIZE;
+ adapter->tx_buf_size = (adapter->tx_buf_size
+ / MWIFIEX_SDIO_BLOCK_SIZE)
+ * MWIFIEX_SDIO_BLOCK_SIZE;
adapter->curr_tx_buf_size = adapter->tx_buf_size;
dev_dbg(adapter->dev,
"cmd: max_tx_buf_size=%d, tx_buf_size=%d\n",
- adapter->max_tx_buf_size, adapter->tx_buf_size);
+ adapter->max_tx_buf_size, adapter->tx_buf_size);
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
- le16_to_cpu(resp->
- params.
- tx_buf.
- mp_end_port));
+ le16_to_cpu(resp->params.tx_buf.mp_end_port));
break;
case HostCmd_CMD_AMSDU_AGGR_CTRL:
ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
@@ -956,7 +926,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
- resp->command);
+ resp->command);
break;
}
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index d7aa21da84d0..cc531b536a56 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -93,15 +93,15 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
*/
dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
- priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
+ priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid,
- priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+ priv->curr_bss_params.bss_descriptor.ssid.ssid,
+ priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
memcpy(&priv->prev_ssid,
&priv->curr_bss_params.bss_descriptor.ssid,
- sizeof(struct mwifiex_802_11_ssid));
+ sizeof(struct cfg80211_ssid));
memcpy(priv->prev_bssid,
priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN);
@@ -115,9 +115,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
if (adapter->num_cmd_timeout && adapter->curr_cmd)
return;
priv->media_connected = false;
- dev_dbg(adapter->dev, "info: successfully disconnected from"
- " %pM: reason code %d\n", priv->cfg_bssid,
- WLAN_REASON_DEAUTH_LEAVING);
+ dev_dbg(adapter->dev,
+ "info: successfully disconnected from %pM: reason code %d\n",
+ priv->cfg_bssid, WLAN_REASON_DEAUTH_LEAVING);
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
NULL, 0, GFP_KERNEL);
@@ -192,8 +192,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
- dev_err(adapter->dev, "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL,"
- " ignoring it\n");
+ dev_err(adapter->dev,
+ "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
break;
case EVENT_LINK_SENSED:
dev_dbg(adapter->dev, "event: LINK_SENSED\n");
@@ -235,8 +235,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_PS_AWAKE:
dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
if (!adapter->pps_uapsd_mode &&
- priv->media_connected &&
- adapter->sleep_period.period) {
+ priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
dev_dbg(adapter->dev,
"event: PPS/UAPSD mode activated\n");
@@ -244,15 +243,19 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
if (mwifiex_check_last_packet_indication(priv)) {
- if (!adapter->data_sent) {
- if (!mwifiex_send_null_packet(priv,
- MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET
- |
- MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
+ if (adapter->data_sent) {
+ adapter->ps_state = PS_STATE_AWAKE;
+ adapter->pm_wakeup_card_req = false;
+ adapter->pm_wakeup_fw_try = false;
+ break;
+ }
+ if (!mwifiex_send_null_packet
+ (priv,
+ MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
+ MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
adapter->ps_state =
PS_STATE_SLEEP;
return 0;
- }
}
}
adapter->ps_state = PS_STATE_AWAKE;
@@ -371,12 +374,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_AMSDU_AGGR_CTRL:
dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n",
- *(u16 *) adapter->event_body);
+ *(u16 *) adapter->event_body);
adapter->tx_buf_size =
min(adapter->curr_tx_buf_size,
le16_to_cpu(*(__le16 *) adapter->event_body));
dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
- adapter->tx_buf_size);
+ adapter->tx_buf_size);
break;
case EVENT_WEP_ICV_ERR:
@@ -392,7 +395,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
- eventcause);
+ eventcause);
break;
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index b0fbf5d4fea0..d7b11defafe0 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -71,7 +71,7 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
/* Wait for completion */
wait_event_interruptible(adapter->cmd_wait_q.wait,
- *(cmd_queued->condition));
+ *(cmd_queued->condition));
if (!*(cmd_queued->condition))
cancel_flag = true;
@@ -192,7 +192,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
* first.
*/
int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
- struct mwifiex_802_11_ssid *req_ssid)
+ struct cfg80211_ssid *req_ssid)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -249,15 +249,25 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
* application retrieval */
priv->assoc_rsp_size = 0;
ret = mwifiex_associate(priv, bss_desc);
+
+ /* If auth type is auto and association fails using open mode,
+ * try to connect using shared mode */
+ if (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG &&
+ priv->sec_info.is_authtype_auto &&
+ priv->sec_info.wep_enabled) {
+ priv->sec_info.authentication_mode =
+ NL80211_AUTHTYPE_SHARED_KEY;
+ ret = mwifiex_associate(priv, bss_desc);
+ }
+
if (bss)
cfg80211_put_bss(bss);
} else {
/* Adhoc mode */
/* If the requested SSID matches current SSID, return */
if (bss_desc && bss_desc->ssid.ssid_len &&
- (!mwifiex_ssid_cmp
- (&priv->curr_bss_params.bss_descriptor.ssid,
- &bss_desc->ssid))) {
+ (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
+ ssid, &bss_desc->ssid))) {
kfree(bss_desc);
kfree(beacon_ie);
return 0;
@@ -339,9 +349,8 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
if (hs_cfg->gap)
adapter->hs_cfg.gap = (u8)hs_cfg->gap;
- } else if (adapter->hs_cfg.conditions ==
- cpu_to_le32(
- HOST_SLEEP_CFG_CANCEL)) {
+ } else if (adapter->hs_cfg.conditions
+ == cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) {
/* Return failure if no parameters for HS
enable */
status = -1;
@@ -363,7 +372,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
cpu_to_le32(prev_cond);
} else {
adapter->hs_cfg.conditions =
- cpu_to_le32(hs_cfg->conditions);
+ cpu_to_le32(hs_cfg->conditions);
adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
adapter->hs_cfg.gap = (u8)hs_cfg->gap;
}
@@ -416,11 +425,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
adapter->hs_activate_wait_q_woken = false;
- memset(&hscfg, 0, sizeof(struct mwifiex_hs_config_param));
+ memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
hscfg.is_invoke_hostcmd = true;
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_STA),
+ MWIFIEX_BSS_ROLE_STA),
HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
&hscfg)) {
dev_err(adapter->dev, "IOCTL request HS enable failed\n");
@@ -428,7 +437,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
}
wait_event_interruptible(adapter->hs_activate_wait_q,
- adapter->hs_activate_wait_q_woken);
+ adapter->hs_activate_wait_q_woken);
return true;
}
@@ -453,8 +462,7 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
info->bss_mode = priv->bss_mode;
- memcpy(&info->ssid, &bss_desc->ssid,
- sizeof(struct mwifiex_802_11_ssid));
+ memcpy(&info->ssid, &bss_desc->ssid, sizeof(struct cfg80211_ssid));
memcpy(&info->bssid, &bss_desc->mac_address, ETH_ALEN);
@@ -471,7 +479,7 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
info->bcn_nf_last = priv->bcn_nf_last;
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
+ if (priv->sec_info.wep_enabled)
info->wep_status = true;
else
info->wep_status = false;
@@ -519,30 +527,27 @@ int mwifiex_bss_set_channel(struct mwifiex_private *priv,
adapter->adhoc_start_band = BAND_G | BAND_B;
if (chan->channel) {
if (chan->channel <= MAX_CHANNEL_BAND_BG)
- cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, 0, (u16) chan->channel);
+ cfp = mwifiex_get_cfp(priv, 0, (u16) chan->channel, 0);
if (!cfp) {
- cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, BAND_A, (u16) chan->channel);
+ cfp = mwifiex_get_cfp(priv, BAND_A,
+ (u16) chan->channel, 0);
if (cfp) {
if (adapter->adhoc_11n_enabled)
adapter->adhoc_start_band = BAND_A
- | BAND_AN;
+ | BAND_AN;
else
adapter->adhoc_start_band = BAND_A;
}
}
} else {
if (chan->freq <= MAX_FREQUENCY_BAND_BG)
- cfp = mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
- priv, 0, chan->freq);
+ cfp = mwifiex_get_cfp(priv, 0, 0, chan->freq);
if (!cfp) {
- cfp = mwifiex_get_cfp_by_band_and_freq_from_cfg80211
- (priv, BAND_A, chan->freq);
+ cfp = mwifiex_get_cfp(priv, BAND_A, 0, chan->freq);
if (cfp) {
if (adapter->adhoc_11n_enabled)
adapter->adhoc_start_band = BAND_A
- | BAND_AN;
+ | BAND_AN;
else
adapter->adhoc_start_band = BAND_A;
}
@@ -578,7 +583,7 @@ static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
}
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_RF_CHANNEL,
- action, 0, channel);
+ action, 0, channel);
}
/*
@@ -599,7 +604,7 @@ static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
* - Start/Join the IBSS
*/
int
-mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
+mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel)
{
int ret;
struct mwifiex_bss_info bss_info;
@@ -624,7 +629,7 @@ mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
goto done;
}
dev_dbg(priv->adapter->dev, "cmd: updating channel from %d to %d\n",
- curr_chan, channel);
+ curr_chan, channel);
if (!bss_info.media_connected) {
ret = 0;
@@ -636,7 +641,7 @@ mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
ret = mwifiex_deauthenticate(priv, ssid_bssid.bssid);
ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_SET,
- (u16 *) &channel);
+ &channel);
/* Do specific SSID scanning */
if (mwifiex_request_scan(priv, &bss_info.ssid)) {
@@ -646,7 +651,8 @@ mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
chan = __ieee80211_get_channel(priv->wdev->wiphy,
- ieee80211_channel_to_frequency(channel, band));
+ ieee80211_channel_to_frequency(channel,
+ band));
/* Find the BSS we want using available scan results */
bss = cfg80211_get_bss(priv->wdev->wiphy, chan, bss_info.bssid,
@@ -654,7 +660,7 @@ mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
if (!bss)
wiphy_warn(priv->wdev->wiphy, "assoc: bss %pM not in scan results\n",
- bss_info.bssid);
+ bss_info.bssid);
ret = mwifiex_bss_start(priv, bss, &bss_info.ssid);
done:
@@ -783,7 +789,9 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
if (!ret) {
if (rate->is_rate_auto)
rate->rate = mwifiex_index_to_data_rate(priv,
- priv->tx_rate, priv->tx_htinfo);
+ priv->tx_rate,
+ priv->tx_htinfo
+ );
else
rate->rate = priv->data_rate;
} else {
@@ -820,16 +828,16 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
if ((dbm < priv->min_tx_power_level) ||
(dbm > priv->max_tx_power_level)) {
dev_err(priv->adapter->dev, "txpower value %d dBm"
- " is out of range (%d dBm-%d dBm)\n",
- dbm, priv->min_tx_power_level,
- priv->max_tx_power_level);
+ " is out of range (%d dBm-%d dBm)\n",
+ dbm, priv->min_tx_power_level,
+ priv->max_tx_power_level);
return -1;
}
}
buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL);
if (!buf) {
dev_err(priv->adapter->dev, "%s: failed to alloc cmd buffer\n",
- __func__);
+ __func__);
return -ENOMEM;
}
@@ -837,13 +845,13 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
if (!power_cfg->is_power_auto) {
txp_cfg->mode = cpu_to_le32(1);
- pg_tlv = (struct mwifiex_types_power_group *) (buf +
- sizeof(struct host_cmd_ds_txpwr_cfg));
+ pg_tlv = (struct mwifiex_types_power_group *)
+ (buf + sizeof(struct host_cmd_ds_txpwr_cfg));
pg_tlv->type = TLV_TYPE_POWER_GROUP;
pg_tlv->length = 4 * sizeof(struct mwifiex_power_group);
- pg = (struct mwifiex_power_group *) (buf +
- sizeof(struct host_cmd_ds_txpwr_cfg) +
- sizeof(struct mwifiex_types_power_group));
+ pg = (struct mwifiex_power_group *)
+ (buf + sizeof(struct host_cmd_ds_txpwr_cfg)
+ + sizeof(struct mwifiex_types_power_group));
/* Power group for modulation class HR/DSSS */
pg->first_rate_code = 0x00;
pg->last_rate_code = 0x03;
@@ -906,8 +914,8 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
sub_cmd, BITMAP_STA_PS, NULL);
if ((!ret) && (sub_cmd == DIS_AUTO_PS))
ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH, GET_PS,
- 0, NULL);
+ HostCmd_CMD_802_11_PS_MODE_ENH,
+ GET_PS, 0, NULL);
return ret;
}
@@ -931,7 +939,7 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
priv->wpa_ie_len = (u8) ie_len;
dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
- priv->wpa_ie_len, priv->wpa_ie[0]);
+ priv->wpa_ie_len, priv->wpa_ie[0]);
if (priv->wpa_ie[0] == WLAN_EID_WPA) {
priv->sec_info.wpa_enabled = true;
@@ -972,7 +980,7 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
priv->wapi_ie_len = ie_len;
dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
- priv->wapi_ie_len, priv->wapi_ie[0]);
+ priv->wapi_ie_len, priv->wapi_ie[0]);
if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
priv->sec_info.wapi_enabled = true;
@@ -998,8 +1006,8 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
{
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
- encrypt_key);
+ HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
+ encrypt_key);
}
/*
@@ -1020,7 +1028,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
wep_key = &priv->wep_key[priv->wep_key_curr_index];
index = encrypt_key->key_index;
if (encrypt_key->key_disable) {
- priv->sec_info.wep_status = MWIFIEX_802_11_WEP_DISABLED;
+ priv->sec_info.wep_enabled = 0;
} else if (!encrypt_key->key_len) {
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
@@ -1030,7 +1038,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
return -1;
}
priv->wep_key_curr_index = (u16) index;
- priv->sec_info.wep_status = MWIFIEX_802_11_WEP_ENABLED;
+ priv->sec_info.wep_enabled = 1;
} else {
wep_key = &priv->wep_key[index];
memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
@@ -1040,7 +1048,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
encrypt_key->key_len);
wep_key->key_index = index;
wep_key->key_length = encrypt_key->key_len;
- priv->sec_info.wep_status = MWIFIEX_802_11_WEP_ENABLED;
+ priv->sec_info.wep_enabled = 1;
}
if (wep_key->key_length) {
/* Send request to firmware */
@@ -1050,7 +1058,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
if (ret)
return ret;
}
- if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
+ if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
@@ -1093,9 +1101,9 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
/* Send the key as PTK to firmware */
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
- encrypt_key);
+ HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ KEY_INFO_ENABLED, encrypt_key);
if (ret)
return ret;
@@ -1120,14 +1128,14 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
if (remove_key)
ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, !(KEY_INFO_ENABLED),
- encrypt_key);
+ HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ !KEY_INFO_ENABLED, encrypt_key);
else
ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
- encrypt_key);
+ HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ KEY_INFO_ENABLED, encrypt_key);
return ret;
}
@@ -1246,7 +1254,7 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT,
- HostCmd_ACT_GEN_GET, 0, &ver_ext))
+ HostCmd_ACT_GEN_GET, 0, &ver_ext))
return -1;
return 0;
@@ -1263,7 +1271,7 @@ mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log)
{
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
- HostCmd_ACT_GEN_GET, 0, log);
+ HostCmd_ACT_GEN_GET, 0, log);
}
/*
@@ -1403,9 +1411,9 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
}
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
/* Test to see if it is a WPA IE, if not, then it is a gen IE */
- if (((pvendor_ie->element_id == WLAN_EID_WPA)
- && (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui))))
- || (pvendor_ie->element_id == WLAN_EID_RSN)) {
+ if (((pvendor_ie->element_id == WLAN_EID_WPA) &&
+ (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) ||
+ (pvendor_ie->element_id == WLAN_EID_RSN)) {
/* IE is a WPA/WPA2 IE so call set_wpa function */
ret = mwifiex_set_wpa_ie_helper(priv, ie_data_ptr, ie_len);
@@ -1428,9 +1436,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
* wps session flag
*/
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
- if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC)
- && (!memcmp(pvendor_ie->oui, wps_oui,
- sizeof(wps_oui)))) {
+ if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
+ (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) {
priv->wps.session_enable = true;
dev_dbg(priv->adapter->dev,
"info: WPS Session Enabled.\n");
@@ -1439,7 +1446,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
/* Append the passed data to the end of the
genIeBuffer */
memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr,
- ie_len);
+ ie_len);
/* Increment the stored buffer length by the
size passed */
priv->gen_ie_buf_len += ie_len;
@@ -1483,7 +1490,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
return -1;
} else {
memcpy(adapter->arp_filter, gen_ie->ie_data,
- gen_ie->len);
+ gen_ie->len);
adapter->arp_filter_size = gen_ie->len;
}
break;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 5e1ef7e5da4f..750b695aca12 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -43,7 +43,9 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
{
int ret;
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
- struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
+ struct mwifiex_private *priv =
+ mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+ rx_info->bss_type);
struct rx_packet_hdr *rx_pkt_hdr;
struct rxpd *local_rx_pd;
int hdr_chop;
@@ -124,7 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
struct rx_packet_hdr *rx_pkt_hdr;
u8 ta[ETH_ALEN];
u16 rx_pkt_type;
- struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
+ struct mwifiex_private *priv =
+ mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+ rx_info->bss_type);
if (!priv)
return -1;
@@ -155,7 +159,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
skb_trim(skb, local_rx_pd->rx_pkt_length);
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev->iftype, 0, false);
+ priv->wdev->iftype, 0, false);
while (!skb_queue_empty(&list)) {
rx_skb = __skb_dequeue(&list);
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index d97facd70e88..7af534feb420 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -50,8 +50,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
u8 pad;
if (!skb->len) {
- dev_err(adapter->dev, "Tx: bad packet length: %d\n",
- skb->len);
+ dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
tx_info->status_code = -1;
return skb->data;
}
@@ -60,19 +59,20 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN
- + pad));
+ + pad));
skb_push(skb, sizeof(*local_tx_pd) + pad);
local_tx_pd = (struct txpd *) skb->data;
memset(local_tx_pd, 0, sizeof(struct txpd));
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
- local_tx_pd->tx_pkt_length = cpu_to_le16((u16) (skb->len -
- (sizeof(struct txpd) + pad)));
+ local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
+ (sizeof(struct txpd)
+ + pad)));
local_tx_pd->priority = (u8) skb->priority;
local_tx_pd->pkt_delay_2ms =
- mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+ mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
if (local_tx_pd->priority <
ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
@@ -82,7 +82,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
*/
local_tx_pd->tx_control =
cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[local_tx_pd->
- priority]);
+ priority]);
if (adapter->pps_uapsd_mode) {
if (mwifiex_check_last_packet_indication(priv)) {
@@ -136,7 +136,8 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
return -1;
tx_info = MWIFIEX_SKB_TXCB(skb);
- tx_info->bss_index = priv->bss_index;
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
skb_push(skb, sizeof(struct txpd));
@@ -159,13 +160,13 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
case -1:
dev_kfree_skb_any(skb);
dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
- __func__, ret);
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
break;
case 0:
dev_kfree_skb_any(skb);
dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
- __func__);
+ __func__);
adapter->tx_lock_flag = true;
break;
case -EINPROGRESS:
@@ -191,8 +192,8 @@ mwifiex_check_last_packet_indication(struct mwifiex_private *priv)
if (mwifiex_wmm_lists_empty(adapter))
ret = true;
- if (ret && !adapter->cmd_sent && !adapter->curr_cmd
- && !is_command_pending(adapter)) {
+ if (ret && !adapter->cmd_sent && !adapter->curr_cmd &&
+ !is_command_pending(adapter)) {
adapter->delay_null_pkt = false;
ret = true;
} else {
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index d9274a1b77ac..d2af8cb98541 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -48,7 +48,8 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
- rx_info->bss_index = priv->bss_index;
+ rx_info->bss_num = priv->bss_num;
+ rx_info->bss_type = priv->bss_type;
return mwifiex_process_sta_rx_packet(adapter, skb);
}
@@ -84,8 +85,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
switch (ret) {
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
- (adapter->pps_uapsd_mode) &&
- (adapter->tx_lock_flag)) {
+ (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) {
priv->adapter->tx_lock_flag = false;
if (local_tx_pd)
local_tx_pd->flags = 0;
@@ -95,7 +95,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
case -1:
adapter->data_sent = false;
dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
- ret);
+ ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, ret);
break;
@@ -130,7 +130,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
return 0;
tx_info = MWIFIEX_SKB_TXCB(skb);
- priv = mwifiex_bss_index_to_priv(adapter, tx_info->bss_index);
+ priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
+ tx_info->bss_type);
if (!priv)
goto done;
@@ -149,11 +150,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
tpriv = adapter->priv[i];
- if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
- && (tpriv->media_connected)) {
+ if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
+ (tpriv->media_connected)) {
if (netif_queue_stopped(tpriv->netdev))
mwifiex_wake_up_net_dev_queue(tpriv->netdev,
- adapter);
+ adapter);
}
}
done:
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 06976f517f66..6b399976d6c8 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -93,10 +93,10 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
sizeof(priv->wmm.packets_out));
info->max_tx_buf_size = (u32) adapter->max_tx_buf_size;
info->tx_buf_size = (u32) adapter->tx_buf_size;
- info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(
- priv, info->rx_tbl);
- info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(
- priv, info->tx_tbl);
+ info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(priv,
+ info->rx_tbl);
+ info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(priv,
+ info->tx_tbl);
info->ps_mode = adapter->ps_mode;
info->ps_state = adapter->ps_state;
info->is_deep_sleep = adapter->is_deep_sleep;
@@ -105,19 +105,19 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->is_hs_configured = adapter->is_hs_configured;
info->hs_activated = adapter->hs_activated;
info->num_cmd_host_to_card_failure
- = adapter->dbg.num_cmd_host_to_card_failure;
+ = adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
= adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure;
info->num_tx_host_to_card_failure
- = adapter->dbg.num_tx_host_to_card_failure;
+ = adapter->dbg.num_tx_host_to_card_failure;
info->num_event_deauth = adapter->dbg.num_event_deauth;
info->num_event_disassoc = adapter->dbg.num_event_disassoc;
info->num_event_link_lost = adapter->dbg.num_event_link_lost;
info->num_cmd_deauth = adapter->dbg.num_cmd_deauth;
info->num_cmd_assoc_success =
- adapter->dbg.num_cmd_assoc_success;
+ adapter->dbg.num_cmd_assoc_success;
info->num_cmd_assoc_failure =
- adapter->dbg.num_cmd_assoc_failure;
+ adapter->dbg.num_cmd_assoc_failure;
info->num_tx_timeout = adapter->dbg.num_tx_timeout;
info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
@@ -159,7 +159,8 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
rx_info = MWIFIEX_SKB_RXCB(skb);
- priv = mwifiex_bss_index_to_priv(adapter, rx_info->bss_index);
+ priv = mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+ rx_info->bss_type);
if (!priv)
return -1;
@@ -190,7 +191,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
{
atomic_dec(&adapter->cmd_pending);
dev_dbg(adapter->dev, "cmd completed: status=%d\n",
- adapter->cmd_wait_q.status);
+ adapter->cmd_wait_q.status);
*(cmd_node->condition) = true;
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 6c239c3c8249..5a7316c6f125 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -87,15 +87,15 @@ mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
const char *ac_str[] = { "BK", "BE", "VI", "VO" };
pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
- "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
- ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
- & MWIFIEX_ACI) >> 5]],
- (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
- (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
- ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
- ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
- (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
- le16_to_cpu(ac_param->tx_op_limit));
+ "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
+ ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
+ & MWIFIEX_ACI) >> 5]],
+ (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
+ (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
+ ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
+ ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
+ (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
+ le16_to_cpu(ac_param->tx_op_limit));
}
/*
@@ -112,7 +112,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
if (!ra_list) {
dev_err(adapter->dev, "%s: failed to alloc ra_list\n",
- __func__);
+ __func__);
return NULL;
}
INIT_LIST_HEAD(&ra_list->list);
@@ -154,7 +154,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
ra_list, ra_list->is_11n_enabled);
list_add_tail(&ra_list->list,
- &priv->wmm.tid_tbl_ptr[i].ra_list);
+ &priv->wmm.tid_tbl_ptr[i].ra_list);
if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr)
priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list;
@@ -217,22 +217,19 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
wmm_ie->reserved);
for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
- cw_min = (1 << (wmm_ie->ac_params[num_ac].ecw_bitmap &
- MWIFIEX_ECW_MIN)) - 1;
- avg_back_off = (cw_min >> 1) +
- (wmm_ie->ac_params[num_ac].aci_aifsn_bitmap &
- MWIFIEX_AIFSN);
-
- ac_idx = wmm_aci_to_qidx_map[(wmm_ie->ac_params[num_ac].
- aci_aifsn_bitmap &
- MWIFIEX_ACI) >> 5];
+ u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
+ u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
+ cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
+ avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
+
+ ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
priv->wmm.queue_priority[ac_idx] = ac_idx;
tmp[ac_idx] = avg_back_off;
- dev_dbg(priv->adapter->dev, "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
- (1 << ((wmm_ie->ac_params[num_ac].ecw_bitmap &
- MWIFIEX_ECW_MAX) >> 4)) - 1,
- cw_min, avg_back_off);
+ dev_dbg(priv->adapter->dev,
+ "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
+ (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
+ cw_min, avg_back_off);
mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
}
@@ -312,13 +309,14 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
/* WMM is not enabled, default priorities */
for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
priv->wmm.ac_down_graded_vals[ac_val] =
- (enum mwifiex_wmm_ac_e) ac_val;
+ (enum mwifiex_wmm_ac_e) ac_val;
} else {
for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
priv->wmm.ac_down_graded_vals[ac_val]
= mwifiex_wmm_eval_downgrade_ac(priv,
(enum mwifiex_wmm_ac_e) ac_val);
- dev_dbg(priv->adapter->dev, "info: WMM: AC PRIO %d maps to %d\n",
+ dev_dbg(priv->adapter->dev,
+ "info: WMM: AC PRIO %d maps to %d\n",
ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
}
}
@@ -394,13 +392,13 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
}
priv->aggr_prio_tbl[6].amsdu
- = priv->aggr_prio_tbl[6].ampdu_ap
- = priv->aggr_prio_tbl[6].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
+ = priv->aggr_prio_tbl[6].ampdu_ap
+ = priv->aggr_prio_tbl[6].ampdu_user
+ = BA_STREAM_NOT_ALLOWED;
priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
- = priv->aggr_prio_tbl[7].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
+ = priv->aggr_prio_tbl[7].ampdu_user
+ = BA_STREAM_NOT_ALLOWED;
priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
@@ -472,7 +470,7 @@ static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
for (i = 0; i < MAX_NUM_TID; i++)
mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
- ra_list);
+ ra_list);
atomic_set(&priv->wmm.tx_pkts_queued, 0);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
@@ -488,9 +486,10 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
for (i = 0; i < MAX_NUM_TID; ++i) {
dev_dbg(priv->adapter->dev,
- "info: ra_list: freeing buf for tid %d\n", i);
+ "info: ra_list: freeing buf for tid %d\n", i);
list_for_each_entry_safe(ra_list, tmp_node,
- &priv->wmm.tid_tbl_ptr[i].ra_list, list) {
+ &priv->wmm.tid_tbl_ptr[i].ra_list,
+ list) {
list_del(&ra_list->list);
kfree(ra_list);
}
@@ -599,11 +598,10 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv,
* is queued at the list tail.
*/
void
-mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
+mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct sk_buff *skb)
{
- struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
- struct mwifiex_private *priv = adapter->priv[tx_info->bss_index];
+ struct mwifiex_adapter *adapter = priv->adapter;
u32 tid;
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
@@ -653,7 +651,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
if (atomic_read(&priv->wmm.highest_queued_prio) <
tos_to_tid_inv[tid_down])
atomic_set(&priv->wmm.highest_queued_prio,
- tos_to_tid_inv[tid_down]);
+ tos_to_tid_inv[tid_down]);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
@@ -682,7 +680,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
struct mwifiex_wmm_ac_status *ac_status;
dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
- resp_len);
+ resp_len);
while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
tlv_hdr = (struct mwifiex_ie_types_data *) curr;
@@ -696,15 +694,15 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev,
"info: CMD_RESP: WMM_GET_STATUS:"
" QSTATUS TLV: %d, %d, %d\n",
- tlv_wmm_qstatus->queue_index,
- tlv_wmm_qstatus->flow_required,
- tlv_wmm_qstatus->disabled);
+ tlv_wmm_qstatus->queue_index,
+ tlv_wmm_qstatus->flow_required,
+ tlv_wmm_qstatus->disabled);
ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
queue_index];
ac_status->disabled = tlv_wmm_qstatus->disabled;
ac_status->flow_required =
- tlv_wmm_qstatus->flow_required;
+ tlv_wmm_qstatus->flow_required;
ac_status->flow_created = tlv_wmm_qstatus->flow_created;
break;
@@ -773,29 +771,27 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
if (!wmm_ie)
return 0;
- dev_dbg(priv->adapter->dev, "info: WMM: process assoc req:"
- "bss->wmmIe=0x%x\n",
- wmm_ie->vend_hdr.element_id);
+ dev_dbg(priv->adapter->dev,
+ "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
+ wmm_ie->vend_hdr.element_id);
- if ((priv->wmm_required
- || (ht_cap && (priv->adapter->config_bands & BAND_GN
- || priv->adapter->config_bands & BAND_AN))
- )
- && wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
+ if ((priv->wmm_required ||
+ (ht_cap && (priv->adapter->config_bands & BAND_GN ||
+ priv->adapter->config_bands & BAND_AN))) &&
+ wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
- le16_to_cpu(wmm_tlv->header.len));
+ le16_to_cpu(wmm_tlv->header.len));
if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
memcpy((u8 *) (wmm_tlv->wmm_ie
- + le16_to_cpu(wmm_tlv->header.len)
- - sizeof(priv->wmm_qosinfo)),
- &priv->wmm_qosinfo,
- sizeof(priv->wmm_qosinfo));
+ + le16_to_cpu(wmm_tlv->header.len)
+ - sizeof(priv->wmm_qosinfo)),
+ &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
ret_len = sizeof(wmm_tlv->header)
- + le16_to_cpu(wmm_tlv->header.len);
+ + le16_to_cpu(wmm_tlv->header.len);
*assoc_buf += ret_len;
}
@@ -814,7 +810,7 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
*/
u8
mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
- const struct sk_buff *skb)
+ const struct sk_buff *skb)
{
u8 ret_val;
struct timeval out_tstamp, in_tstamp;
@@ -851,17 +847,18 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
struct mwifiex_ra_list_tbl *ptr, *head;
struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
struct mwifiex_tid_tbl *tid_ptr;
+ atomic_t *hqp;
int is_list_empty;
unsigned long flags;
int i, j;
for (j = adapter->priv_num - 1; j >= 0; --j) {
spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
- flags);
+ flags);
is_list_empty = list_empty(&adapter->bss_prio_tbl[j]
- .bss_prio_head);
+ .bss_prio_head);
spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
- flags);
+ flags);
if (is_list_empty)
continue;
@@ -880,12 +877,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
}
do {
- atomic_t *hqp;
- spinlock_t *lock;
-
priv_tmp = bssprio_node->priv;
hqp = &priv_tmp->wmm.highest_queued_prio;
- lock = &priv_tmp->wmm.ra_list_spinlock;
for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
@@ -924,16 +917,10 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
do {
is_list_empty =
skb_queue_empty(&ptr->skb_head);
- if (!is_list_empty) {
- spin_lock_irqsave(lock, flags);
- if (atomic_read(hqp) > i)
- atomic_set(hqp, i);
- spin_unlock_irqrestore(lock,
- flags);
- *priv = priv_tmp;
- *tid = tos_to_tid[i];
- return ptr;
- }
+
+ if (!is_list_empty)
+ goto found;
+
/* Get next ra */
ptr = list_first_entry(&ptr->list,
struct mwifiex_ra_list_tbl,
@@ -970,6 +957,17 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
} while (bssprio_node != bssprio_head);
}
return NULL;
+
+found:
+ spin_lock_irqsave(&priv_tmp->wmm.ra_list_spinlock, flags);
+ if (atomic_read(hqp) > i)
+ atomic_set(hqp, i);
+ spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags);
+
+ *priv = priv_tmp;
+ *tid = tos_to_tid[i];
+
+ return ptr;
}
/*
@@ -1209,25 +1207,24 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
return 0;
}
- if (!ptr->is_11n_enabled || mwifiex_is_ba_stream_setup(priv, ptr, tid)
- || ((priv->sec_info.wpa_enabled
- || priv->sec_info.wpa2_enabled) && !priv->wpa_is_gtk_set)
- ) {
+ if (!ptr->is_11n_enabled ||
+ mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+ ((priv->sec_info.wpa_enabled ||
+ priv->sec_info.wpa2_enabled) &&
+ !priv->wpa_is_gtk_set)) {
mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
} else {
if (mwifiex_is_ampdu_allowed(priv, tid)) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
- mwifiex_11n_create_tx_ba_stream_tbl(priv,
- ptr->ra, tid,
- BA_STREAM_SETUP_INPROGRESS);
+ mwifiex_create_ba_tbl(priv, ptr->ra, tid,
+ BA_SETUP_INPROGRESS);
mwifiex_send_addba(priv, tid, ptr->ra);
} else if (mwifiex_find_stream_to_delete
(priv, tid, &tid_del, ra)) {
- mwifiex_11n_create_tx_ba_stream_tbl(priv,
- ptr->ra, tid,
- BA_STREAM_SETUP_INPROGRESS);
+ mwifiex_create_ba_tbl(priv, ptr->ra, tid,
+ BA_SETUP_INPROGRESS);
mwifiex_send_delba(priv, tid_del, ra, 1);
}
}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index fcea1f68792f..ec839952d2e7 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -80,8 +80,8 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
return true;
}
-void mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
- struct sk_buff *skb);
+void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
+ struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index dd5aeaff44ba..b48674b577e6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -402,6 +402,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
#define MWL8K_CMD_GET_WATCHDOG_BITMAP 0x0205
+#define MWL8K_CMD_DEL_MAC_ADDR 0x0206 /* per-vif */
#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
@@ -1330,7 +1331,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
wh->addr1);
if (mwl8k_vif != NULL &&
- mwl8k_vif->is_hw_crypto_enabled == true) {
+ mwl8k_vif->is_hw_crypto_enabled) {
/*
* When MMIC ERROR is encountered
* by the firmware, payload is
@@ -1993,8 +1994,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
*/
if (txq->len >= MWL8K_TX_DESCS - 2) {
- if (mgmtframe == false ||
- txq->len == MWL8K_TX_DESCS) {
+ if (!mgmtframe || txq->len == MWL8K_TX_DESCS) {
if (start_ba_session) {
spin_lock(&priv->stream_lock);
mwl8k_remove_stream(hw, stream);
@@ -3430,10 +3430,7 @@ static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
return rc;
}
-/*
- * CMD_SET_MAC_ADDR.
- */
-struct mwl8k_cmd_set_mac_addr {
+struct mwl8k_cmd_update_mac_addr {
struct mwl8k_cmd_pkt header;
union {
struct {
@@ -3449,12 +3446,12 @@ struct mwl8k_cmd_set_mac_addr {
#define MWL8K_MAC_TYPE_PRIMARY_AP 2
#define MWL8K_MAC_TYPE_SECONDARY_AP 3
-static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u8 *mac)
+static int mwl8k_cmd_update_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac, bool set)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- struct mwl8k_cmd_set_mac_addr *cmd;
+ struct mwl8k_cmd_update_mac_addr *cmd;
int mac_type;
int rc;
@@ -3475,7 +3472,11 @@ static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ if (set)
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ else
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_DEL_MAC_ADDR);
+
cmd->header.length = cpu_to_le16(sizeof(*cmd));
if (priv->ap_fw) {
cmd->mbss.mac_type = cpu_to_le16(mac_type);
@@ -3491,6 +3492,24 @@ static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
}
/*
+ * MWL8K_CMD_SET_MAC_ADDR.
+ */
+static inline int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
+{
+ return mwl8k_cmd_update_mac_addr(hw, vif, mac, true);
+}
+
+/*
+ * MWL8K_CMD_DEL_MAC_ADDR.
+ */
+static inline int mwl8k_cmd_del_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
+{
+ return mwl8k_cmd_update_mac_addr(hw, vif, mac, false);
+}
+
+/*
* CMD_SET_RATEADAPT_MODE.
*/
struct mwl8k_cmd_set_rate_adapt_mode {
@@ -4093,7 +4112,7 @@ static int mwl8k_set_key(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
if (sta == NULL)
- addr = hw->wiphy->perm_addr;
+ addr = vif->addr;
else
addr = sta->addr;
@@ -4542,7 +4561,7 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
if (priv->ap_fw)
mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
- mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
+ mwl8k_cmd_del_mac_addr(hw, vif, vif->addr);
mwl8k_remove_vif(priv, mwl8k_vif);
}
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 9fb77d0319f5..dd6c64ac406e 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -941,11 +941,9 @@ void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
/* Add desc and skb to rx queue */
rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
- if (!rx_data) {
- printk(KERN_WARNING "%s: Can't allocate RX packet\n",
- dev->name);
+ if (!rx_data)
goto drop;
- }
+
rx_data->desc = desc;
rx_data->skb = skb;
list_add_tail(&rx_data->list, &priv->rx_list);
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index ae8ce56670b6..f634d4582bfe 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1754,11 +1754,6 @@ static struct usb_driver orinoco_driver = {
.id_table = ezusb_table,
};
-/* Can't be declared "const" or the whole __initdata section will
- * become const */
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Manuel Estrada Sainz)";
-
module_usb_driver(orinoco_driver);
MODULE_AUTHOR("Manuel Estrada Sainz");
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index af2ca1a9c7d3..ee8af1f047c8 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -227,6 +227,9 @@ static int p54_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
+ int err;
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
mutex_lock(&priv->conf_mutex);
if (priv->mode != NL80211_IFTYPE_MONITOR) {
@@ -249,9 +252,9 @@ static int p54_add_interface(struct ieee80211_hw *dev,
}
memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
- p54_setup_mac(priv);
+ err = p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
- return 0;
+ return err;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
@@ -734,7 +737,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index b1f51a215792..45df728183fd 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -624,36 +624,39 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM
-static int p54p_suspend(struct pci_dev *pdev, pm_message_t state)
+static int p54p_suspend(struct device *device)
{
- struct ieee80211_hw *dev = pci_get_drvdata(pdev);
- struct p54p_priv *priv = dev->priv;
-
- if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
- ieee80211_stop_queues(dev);
- p54p_stop(dev);
- }
+ struct pci_dev *pdev = to_pci_dev(device);
pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
return 0;
}
-static int p54p_resume(struct pci_dev *pdev)
+static int p54p_resume(struct device *device)
{
- struct ieee80211_hw *dev = pci_get_drvdata(pdev);
- struct p54p_priv *priv = dev->priv;
+ struct pci_dev *pdev = to_pci_dev(device);
+ int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ err = pci_reenable_device(pdev);
+ if (err)
+ return err;
+ return pci_set_power_state(pdev, PCI_D0);
+}
- if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
- p54p_open(dev);
- ieee80211_wake_queues(dev);
- }
+static const struct dev_pm_ops p54pci_pm_ops = {
+ .suspend = p54p_suspend,
+ .resume = p54p_resume,
+ .freeze = p54p_suspend,
+ .thaw = p54p_resume,
+ .poweroff = p54p_suspend,
+ .restore = p54p_resume,
+};
- return 0;
-}
+#define P54P_PM_OPS (&p54pci_pm_ops)
+#else
+#define P54P_PM_OPS (NULL)
#endif /* CONFIG_PM */
static struct pci_driver p54p_driver = {
@@ -661,10 +664,7 @@ static struct pci_driver p54p_driver = {
.id_table = p54p_table,
.probe = p54p_probe,
.remove = __devexit_p(p54p_remove),
-#ifdef CONFIG_PM
- .suspend = p54p_suspend,
- .resume = p54p_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = P54P_PM_OPS,
};
static int __init p54p_init(void)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7faed62c6378..f7929906d437 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -618,19 +618,19 @@ static int __devinit p54spi_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "spi_setup failed");
- goto err_free_common;
+ goto err_free;
}
ret = gpio_request(p54spi_gpio_power, "p54spi power");
if (ret < 0) {
dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret);
- goto err_free_common;
+ goto err_free;
}
ret = gpio_request(p54spi_gpio_irq, "p54spi irq");
if (ret < 0) {
dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret);
- goto err_free_common;
+ goto err_free_gpio_power;
}
gpio_direction_output(p54spi_gpio_power, 0);
@@ -641,7 +641,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
- goto err_free_common;
+ goto err_free_gpio_irq;
}
irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
@@ -673,6 +673,12 @@ static int __devinit p54spi_probe(struct spi_device *spi)
return 0;
err_free_common:
+ free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
+err_free_gpio_irq:
+ gpio_free(p54spi_gpio_irq);
+err_free_gpio_power:
+ gpio_free(p54spi_gpio_power);
+err_free:
p54_free_common(priv->hw);
return ret;
}
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 42b97bc38477..a08a6f0e4dd1 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -690,7 +690,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
- if (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)
+ if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
*flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index a5224f6160e4..851fa10241e1 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -192,11 +192,9 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
err = -ENOMEM;
p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
- if (!buf.mem) {
- printk(KERN_DEBUG "%s: cannot allocate mgmt frame\n",
- ndev->name);
+ if (!buf.mem)
goto error;
- }
+
buf.size = frag_len;
/* create the header directly in the fragment data area */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a330c69583d6..d66e2980bc27 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -518,7 +518,7 @@ struct rndis_wlan_private {
__le32 current_command_oid;
/* encryption stuff */
- int encr_tx_key_index;
+ u8 encr_tx_key_index;
struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
int wpa_version;
@@ -634,7 +634,7 @@ static u32 get_bcm4320_power_dbm(struct rndis_wlan_private *priv)
}
}
-static bool is_wpa_key(struct rndis_wlan_private *priv, int idx)
+static bool is_wpa_key(struct rndis_wlan_private *priv, u8 idx)
{
int cipher = priv->encr_keys[idx].cipher;
@@ -1350,7 +1350,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
}
static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
- u16 *beacon_interval)
+ u32 *beacon_period)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
@@ -1370,14 +1370,14 @@ static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
if (!channel)
return NULL;
- if (beacon_interval)
- *beacon_interval = le16_to_cpu(config.beacon_period);
+ if (beacon_period)
+ *beacon_period = le32_to_cpu(config.beacon_period);
return channel;
}
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
- int index)
+ u8 index)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_wep_key ndis_key;
@@ -1387,13 +1387,15 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n",
__func__, index, key_len);
- if ((key_len != 5 && key_len != 13) || index < 0 || index > 3)
+ if (index >= RNDIS_WLAN_NUM_KEYS)
return -EINVAL;
if (key_len == 5)
cipher = WLAN_CIPHER_SUITE_WEP40;
- else
+ else if (key_len == 13)
cipher = WLAN_CIPHER_SUITE_WEP104;
+ else
+ return -EINVAL;
memset(&ndis_key, 0, sizeof(ndis_key));
@@ -1428,7 +1430,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
}
static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
- int index, const u8 *addr, const u8 *rx_seq,
+ u8 index, const u8 *addr, const u8 *rx_seq,
int seq_len, u32 cipher, __le32 flags)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
@@ -1436,7 +1438,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
bool is_addr_ok;
int ret;
- if (index < 0 || index >= 4) {
+ if (index >= RNDIS_WLAN_NUM_KEYS) {
netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n",
__func__, index);
return -EINVAL;
@@ -1524,7 +1526,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
return 0;
}
-static int restore_key(struct usbnet *usbdev, int key_idx)
+static int restore_key(struct usbnet *usbdev, u8 key_idx)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct rndis_wlan_encr_key key;
@@ -1550,13 +1552,13 @@ static void restore_keys(struct usbnet *usbdev)
restore_key(usbdev, i);
}
-static void clear_key(struct rndis_wlan_private *priv, int idx)
+static void clear_key(struct rndis_wlan_private *priv, u8 idx)
{
memset(&priv->encr_keys[idx], 0, sizeof(priv->encr_keys[idx]));
}
/* remove_key is for both wep and wpa */
-static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
+static int remove_key(struct usbnet *usbdev, u8 index, const u8 *bssid)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_remove_key remove_key;
@@ -1790,9 +1792,9 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
- int i, len, count, newlen, err;
+ int i, newlen, err;
+ unsigned int count;
- len = le32_to_cpu(pmkids->length);
count = le32_to_cpu(pmkids->bssid_info_count);
if (count > max_pmkids)
@@ -1831,9 +1833,9 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
- int i, err, len, count, newlen;
+ int i, err, newlen;
+ unsigned int count;
- len = le32_to_cpu(pmkids->length);
count = le32_to_cpu(pmkids->bssid_info_count);
if (count > max_pmkids)
@@ -2683,7 +2685,7 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
s32 signal;
u64 timestamp;
u16 capability;
- u16 beacon_interval = 0;
+ u32 beacon_period = 0;
__le32 rssi;
u8 ie_buf[34];
int len, ret, ie_len;
@@ -2708,7 +2710,7 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
}
/* Get channel and beacon interval */
- channel = get_current_channel(usbdev, &beacon_interval);
+ channel = get_current_channel(usbdev, &beacon_period);
if (!channel) {
netdev_warn(usbdev->net, "%s(): could not get channel.\n",
__func__);
@@ -2738,11 +2740,11 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, "
"capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), "
"signal:%d\n", __func__, (channel ? channel->center_freq : -1),
- bssid, (u32)timestamp, capability, beacon_interval, ie_len,
+ bssid, (u32)timestamp, capability, beacon_period, ie_len,
ssid.essid, signal);
bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
- timestamp, capability, beacon_interval, ie_buf, ie_len,
+ timestamp, capability, beacon_period, ie_buf, ie_len,
signal, GFP_KERNEL);
cfg80211_put_bss(bss);
}
@@ -2755,9 +2757,10 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_assoc_info *info = NULL;
u8 bssid[ETH_ALEN];
- int resp_ie_len, req_ie_len;
+ unsigned int resp_ie_len, req_ie_len;
+ unsigned int offset;
u8 *req_ie, *resp_ie;
- int ret, offset;
+ int ret;
bool roamed = false;
bool match_bss;
@@ -2785,7 +2788,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE);
if (!ret) {
req_ie_len = le32_to_cpu(info->req_ie_length);
- if (req_ie_len > 0) {
+ if (req_ie_len > CONTROL_BUFFER_SIZE)
+ req_ie_len = CONTROL_BUFFER_SIZE;
+ if (req_ie_len != 0) {
offset = le32_to_cpu(info->offset_req_ies);
if (offset > CONTROL_BUFFER_SIZE)
@@ -2799,7 +2804,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
}
resp_ie_len = le32_to_cpu(info->resp_ie_length);
- if (resp_ie_len > 0) {
+ if (resp_ie_len > CONTROL_BUFFER_SIZE)
+ resp_ie_len = CONTROL_BUFFER_SIZE;
+ if (resp_ie_len != 0) {
offset = le32_to_cpu(info->offset_resp_ies);
if (offset > CONTROL_BUFFER_SIZE)
@@ -3038,7 +3045,7 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
struct rndis_indicate *msg, int buflen)
{
struct ndis_80211_status_indication *indication;
- int len, offset;
+ unsigned int len, offset;
offset = offsetof(struct rndis_indicate, status) +
le32_to_cpu(msg->offset);
@@ -3050,7 +3057,7 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
return;
}
- if (offset + len > buflen) {
+ if (len > buflen || offset > buflen || offset + len > buflen) {
netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n",
offset + len, buflen);
return;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index a0a7854facc0..299c3879582d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -163,7 +163,7 @@ config RT2800USB_RT53XX
depends on EXPERIMENTAL
---help---
This adds support for rt53xx wireless chipset family to the
- rt2800pci driver.
+ rt2800usb driver.
Supported chips: RT5370
config RT2800USB_UNKNOWN
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 2571a2fa3d09..063bfa8b91f4 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -68,6 +68,7 @@
#define RF3322 0x000c
#define RF3053 0x000d
#define RF5370 0x5370
+#define RF5372 0x5372
#define RF5390 0x5390
/*
@@ -965,6 +966,7 @@
* TX_PIN_CFG:
*/
#define TX_PIN_CFG 0x1328
+#define TX_PIN_CFG_PA_PE_DISABLE 0xfcfffff0
#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
@@ -985,6 +987,14 @@
#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
+#define TX_PIN_CFG_PA_PE_A2_EN FIELD32(0x01000000)
+#define TX_PIN_CFG_PA_PE_G2_EN FIELD32(0x02000000)
+#define TX_PIN_CFG_PA_PE_A2_POL FIELD32(0x04000000)
+#define TX_PIN_CFG_PA_PE_G2_POL FIELD32(0x08000000)
+#define TX_PIN_CFG_LNA_PE_A2_EN FIELD32(0x10000000)
+#define TX_PIN_CFG_LNA_PE_G2_EN FIELD32(0x20000000)
+#define TX_PIN_CFG_LNA_PE_A2_POL FIELD32(0x40000000)
+#define TX_PIN_CFG_LNA_PE_G2_POL FIELD32(0x80000000)
/*
* TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
@@ -1627,6 +1637,7 @@ struct mac_iveiv_entry {
/*
* H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
+ * CMD_TOKEN: Command id, 0xff disable status reporting.
*/
#define H2M_MAILBOX_CSR 0x7010
#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
@@ -1636,6 +1647,8 @@ struct mac_iveiv_entry {
/*
* H2M_MAILBOX_CID:
+ * Free slots contain 0xff. MCU will store command's token to lowest free slot.
+ * If all slots are occupied status will be dropped.
*/
#define H2M_MAILBOX_CID 0x7014
#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
@@ -1645,6 +1658,7 @@ struct mac_iveiv_entry {
/*
* H2M_MAILBOX_STATUS:
+ * Command status will be saved to same slot as command id.
*/
#define H2M_MAILBOX_STATUS 0x701c
@@ -1796,6 +1810,14 @@ struct mac_iveiv_entry {
#define RFCSR2_RESCAL_EN FIELD8(0x80)
/*
+ * RFCSR 3:
+ */
+#define RFCSR3_K FIELD8(0x0f)
+/* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
+#define RFCSR3_PA1_BIAS_CCK FIELD8(0x70);
+#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80);
+
+/*
* FRCSR 5:
*/
#define RFCSR5_R1 FIELD8(0x0c)
@@ -1811,10 +1833,12 @@ struct mac_iveiv_entry {
* RFCSR 7:
*/
#define RFCSR7_RF_TUNING FIELD8(0x01)
-#define RFCSR7_R02 FIELD8(0x07)
-#define RFCSR7_R3 FIELD8(0x08)
-#define RFCSR7_R45 FIELD8(0x30)
-#define RFCSR7_R67 FIELD8(0xc0)
+#define RFCSR7_BIT1 FIELD8(0x02)
+#define RFCSR7_BIT2 FIELD8(0x04)
+#define RFCSR7_BIT3 FIELD8(0x08)
+#define RFCSR7_BIT4 FIELD8(0x10)
+#define RFCSR7_BIT5 FIELD8(0x20)
+#define RFCSR7_BITS67 FIELD8(0xc0)
/*
* RFCSR 11:
@@ -1839,6 +1863,11 @@ struct mac_iveiv_entry {
#define RFCSR15_TX_LO2_EN FIELD8(0x08)
/*
+ * RFCSR 16:
+ */
+#define RFCSR16_TXMIXER_GAIN FIELD8(0x07)
+
+/*
* RFCSR 17:
*/
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
@@ -1867,6 +1896,13 @@ struct mac_iveiv_entry {
#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
/*
+ * RFCSR 24:
+ */
+#define RFCSR24_TX_AGC_FC FIELD8(0x1f)
+#define RFCSR24_TX_H20M FIELD8(0x20)
+#define RFCSR24_TX_CALIB FIELD8(0x7f)
+
+/*
* RFCSR 27:
*/
#define RFCSR27_R1 FIELD8(0x03)
@@ -1887,6 +1923,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
#define RFCSR31_RX_H20M FIELD8(0x20)
+#define RFCSR31_RX_CALIB FIELD8(0x7f)
/*
* RFCSR 38:
@@ -2093,6 +2130,12 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
/*
+ * EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2).
+ */
+#define EEPROM_TXMIXER_GAIN_A 0x0026
+#define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007)
+
+/*
* EEPROM EIRP Maximum TX power values(unit: dbm)
*/
#define EEPROM_EIRP_MAX_TX_POWER 0x0027
@@ -2259,6 +2302,12 @@ struct mac_iveiv_entry {
/*
* MCU mailbox commands.
+ * MCU_SLEEP - go to power-save mode.
+ * arg1: 1: save as much power as possible, 0: save less power.
+ * status: 1: success, 2: already asleep,
+ * 3: maybe MAC is busy so can't finish this task.
+ * MCU_RADIO_OFF
+ * arg0: 0: do power-saving, NOT turn off radio.
*/
#define MCU_SLEEP 0x30
#define MCU_WAKEUP 0x31
@@ -2279,7 +2328,10 @@ struct mac_iveiv_entry {
/*
* MCU mailbox tokens
*/
-#define TOKEN_WAKUP 3
+#define TOKEN_SLEEP 1
+#define TOKEN_RADIO_OFF 2
+#define TOKEN_WAKEUP 3
+
/*
* DMA descriptor defines.
@@ -2422,4 +2474,23 @@ struct mac_iveiv_entry {
*/
#define EIRP_MAX_TX_POWER_LIMIT 0x50
+/*
+ * Number of TBTT intervals after which we have to adjust
+ * the hw beacon timer.
+ */
+#define BCN_TBTT_OFFSET 64
+
+/*
+ * RT2800 driver data structure
+ */
+struct rt2800_drv_data {
+ u8 calibration_bw20;
+ u8 calibration_bw40;
+ u8 bbp25;
+ u8 bbp26;
+ u8 txmixer_gain_24g;
+ u8 txmixer_gain_5g;
+ unsigned int tbtt_tick;
+};
+
#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 7bef66def10c..6c0a12ea6a15 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -402,7 +402,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
if (rt2x00_is_pci(rt2x00dev)) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -412,18 +413,6 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
}
/*
- * Disable DMA, will be reenabled later when enabling
- * the radio.
- */
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
- rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
-
- /*
* Write firmware to the device.
*/
rt2800_drv_write_firmware(rt2x00dev, data, len);
@@ -444,10 +433,21 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
}
/*
+ * Disable DMA, will be reenabled later when enabling
+ * the radio.
+ */
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+ rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+ /*
* Initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
msleep(1);
return 0;
@@ -1646,10 +1646,14 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
struct rf_channel *rf,
struct channel_info *info)
{
- u8 rfcsr;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 rfcsr, calib_tx, calib_rx;
rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
- rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
+
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR3_K, rf->rf3);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
@@ -1663,16 +1667,82 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
+ rt2x00dev->default_ant.rx_chain_num == 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
+ rt2x00dev->default_ant.tx_chain_num == 1);
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fall through */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ break;
+ }
+
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fall through */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ break;
+ }
+ }
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
- rt2800_rfcsr_write(rt2x00dev, 24,
- rt2x00dev->calibration[conf_is_ht40(conf)]);
+ if (rt2x00_rt(rt2x00dev, RT3390)) {
+ calib_tx = conf_is_ht40(conf) ? 0x68 : 0x4f;
+ calib_rx = conf_is_ht40(conf) ? 0x6f : 0x4f;
+ } else {
+ if (conf_is_ht40(conf)) {
+ calib_tx = drv_data->calibration_bw40;
+ calib_rx = drv_data->calibration_bw40;
+ } else {
+ calib_tx = drv_data->calibration_bw20;
+ calib_rx = drv_data->calibration_bw20;
+ }
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 24, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR24_TX_CALIB, calib_tx);
+ rt2800_rfcsr_write(rt2x00dev, 24, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR31_RX_CALIB, calib_rx);
+ rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
@@ -1680,12 +1750,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
struct rf_channel *rf,
struct channel_info *info)
{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 rfcsr;
u32 reg;
if (rf->channel <= 14) {
- rt2800_bbp_write(rt2x00dev, 25, 0x15);
- rt2800_bbp_write(rt2x00dev, 26, 0x85);
+ rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
+ rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
} else {
rt2800_bbp_write(rt2x00dev, 25, 0x09);
rt2800_bbp_write(rt2x00dev, 26, 0xff);
@@ -1713,8 +1784,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
- (info->default_power1 & 0x3) |
- ((info->default_power1 & 0xC) << 1));
+ info->default_power1);
} else {
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 7);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
@@ -1727,8 +1797,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
- (info->default_power2 & 0x3) |
- ((info->default_power2 & 0xC) << 1));
+ info->default_power2);
} else {
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 7);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
@@ -1738,11 +1807,12 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -1773,10 +1843,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
- rt2800_rfcsr_write(rt2x00dev, 24,
- rt2x00dev->calibration[conf_is_ht40(conf)]);
- rt2800_rfcsr_write(rt2x00dev, 31,
- rt2x00dev->calibration[conf_is_ht40(conf)]);
+ if (conf_is_ht40(conf)) {
+ rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw40);
+ rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw40);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw20);
+ rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw20);
+ }
if (rf->channel <= 14) {
rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
@@ -1784,7 +1857,10 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
- rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
+ rfcsr = 0x4c;
+ rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN,
+ drv_data->txmixer_gain_24g);
+ rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
@@ -1793,12 +1869,20 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
} else {
- rt2800_rfcsr_write(rt2x00dev, 7, 0x14);
+ rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR7_BIT2, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR7_BIT3, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR7_BIT4, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR7_BITS67, 0);
+ rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x43);
- rt2800_rfcsr_write(rt2x00dev, 16, 0x7a);
+ rfcsr = 0x7a;
+ rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN,
+ drv_data->txmixer_gain_5g);
+ rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
if (rf->channel <= 64) {
rt2800_rfcsr_write(rt2x00dev, 19, 0xb7);
@@ -1906,7 +1990,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
r55_nonbt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_nonbt_rev[idx]);
- } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ } else if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
@@ -1956,6 +2041,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
break;
case RF5370:
+ case RF5372:
case RF5390:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
break;
@@ -1972,7 +2058,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 86, 0);
if (rf->channel <= 14) {
- if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ if (!rt2x00_rt(rt2x00dev, RT5390) &&
+ !rt2x00_rt(rt2x00dev, RT5392)) {
if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
&rt2x00dev->cap_flags)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -2414,6 +2501,80 @@ void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
+void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u32 tx_pin;
+ u8 rfcsr;
+
+ /*
+ * A voltage-controlled oscillator(VCO) is an electronic oscillator
+ * designed to be controlled in oscillation frequency by a voltage
+ * input. Maybe the temperature will affect the frequency of
+ * oscillation to be shifted. The VCO calibration will be called
+ * periodically to adjust the frequency to be precision.
+ */
+
+ rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
+ tx_pin &= TX_PIN_CFG_PA_PE_DISABLE;
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+
+ switch (rt2x00dev->chip.rf) {
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3320:
+ case RF3052:
+ rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
+ rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
+ break;
+ case RF5370:
+ case RF5372:
+ case RF5390:
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+ break;
+ default:
+ return;
+ }
+
+ mdelay(1);
+
+ rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
+ if (rt2x00dev->rf_channel <= 14) {
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN, 1);
+ /* fall through */
+ case 2:
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
+ /* fall through */
+ case 1:
+ default:
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
+ break;
+ }
+ } else {
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN, 1);
+ /* fall through */
+ case 2:
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
+ /* fall through */
+ case 1:
+ default:
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, 1);
+ break;
+ }
+ }
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+
+}
+EXPORT_SYMBOL_GPL(rt2800_vco_calibration);
+
static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
@@ -2502,7 +2663,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
- rt2x00_rt(rt2x00dev, RT5390))
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -2637,7 +2799,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
- } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ } else if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3013,7 +3176,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
- if (rt2x00_rt(rt2x00dev, RT5390)) {
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
@@ -3021,19 +3185,22 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390))
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
- } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ } else if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
@@ -3051,7 +3218,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -3063,64 +3231,88 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
else
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
- else if (rt2x00_rt(rt2x00dev, RT5390))
+ else if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 86, 0x38);
else
rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ if (rt2x00_rt(rt2x00dev, RT5392))
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 92, 0x02);
else
rt2800_bbp_write(rt2x00dev, 92, 0x00);
+ if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2800_bbp_write(rt2x00dev, 95, 0x9a);
+ rt2800_bbp_write(rt2x00dev, 98, 0x12);
+ }
+
if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392) ||
rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 104, 0x92);
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 105, 0x01);
- else if (rt2x00_rt(rt2x00dev, RT5390))
+ else if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
+ else if (rt2x00_rt(rt2x00dev, RT5392))
+ rt2800_bbp_write(rt2x00dev, 106, 0x12);
else
rt2800_bbp_write(rt2x00dev, 106, 0x35);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 128, 0x12);
+ if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2800_bbp_write(rt2x00dev, 134, 0xd0);
+ rt2800_bbp_write(rt2x00dev, 135, 0xf6);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_read(rt2x00dev, 138, &value);
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -3132,7 +3324,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 138, value);
}
- if (rt2x00_rt(rt2x00dev, RT5390)) {
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
int ant, div_mode;
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
@@ -3247,6 +3440,7 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u8 rfcsr;
u8 bbp;
u32 reg;
@@ -3258,13 +3452,15 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
+ !rt2x00_rt(rt2x00dev, RT5392) &&
!rt2800_is_305x_soc(rt2x00dev))
return 0;
/*
* Init RF calibration.
*/
- if (rt2x00_rt(rt2x00dev, RT5390)) {
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
@@ -3482,6 +3678,66 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+ } else if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4d);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x8d);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x0c);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x3a);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xa1);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -3535,21 +3791,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
* Set RX Filter calibration for 20MHz and 40MHz
*/
if (rt2x00_rt(rt2x00dev, RT3070)) {
- rt2x00dev->calibration[0] =
+ drv_data->calibration_bw20 =
rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
- rt2x00dev->calibration[1] =
+ drv_data->calibration_bw40 =
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572)) {
- rt2x00dev->calibration[0] =
+ drv_data->calibration_bw20 =
rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
- rt2x00dev->calibration[1] =
+ drv_data->calibration_bw40 =
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
}
- if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ /*
+ * Save BBP 25 & 26 values for later use in channel switching
+ */
+ rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
+ rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
+
+ if (!rt2x00_rt(rt2x00dev, RT5390) &&
+ !rt2x00_rt(rt2x00dev, RT5392)) {
/*
* Set back to initial state
*/
@@ -3577,7 +3840,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
- if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ if (!rt2x00_rt(rt2x00dev, RT5390) &&
+ !rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -3588,11 +3852,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
&rt2x00dev->cap_flags))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
- rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
- rt2x00_get_field16(eeprom,
- EEPROM_TXMIXER_GAIN_BG_VAL));
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ drv_data->txmixer_gain_24g);
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
}
@@ -3645,7 +3906,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
- if (rt2x00_rt(rt2x00dev, RT5390)) {
+ if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
@@ -3800,6 +4062,7 @@ EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u16 word;
u8 *mac;
u8 default_lna_gain;
@@ -3883,6 +4146,14 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
+ if ((word & 0x00ff) != 0x00ff) {
+ drv_data->txmixer_gain_24g =
+ rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
+ } else {
+ drv_data->txmixer_gain_24g = 0;
+ }
+
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
@@ -3892,6 +4163,14 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
default_lna_gain);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
+ if ((word & 0x00ff) != 0x00ff) {
+ drv_data->txmixer_gain_5g =
+ rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
+ } else {
+ drv_data->txmixer_gain_5g = 0;
+ }
+
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
@@ -3929,7 +4208,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
- if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
+ if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
+ rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -3947,9 +4227,10 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RT3390:
case RT3572:
case RT5390:
+ case RT5392:
break;
default:
- ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
+ ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt);
return -ENODEV;
}
@@ -3965,10 +4246,11 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3052:
case RF3320:
case RF5370:
+ case RF5372:
case RF5390:
break;
default:
- ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
+ ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
rt2x00dev->chip.rf);
return -ENODEV;
}
@@ -4218,7 +4500,9 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_AMPDU_AGGREGATION;
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
/*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
* unless we are capable of sending the buffered frames out after the
@@ -4271,6 +4555,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF5370) ||
+ rt2x00_rf(rt2x00dev, RF5372) ||
rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
@@ -4347,6 +4632,20 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
}
}
+ switch (rt2x00dev->chip.rf) {
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3320:
+ case RF3052:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+ __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
+ break;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 8c3c281904fe..419e36cb06be 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -184,6 +184,7 @@ void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count);
void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev);
+void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev);
int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev);
void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dc88baefa72e..0397bbf0ce01 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -480,7 +480,8 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_is_pcie(rt2x00dev) &&
(rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT5390))) {
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))) {
rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -489,7 +490,7 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
- rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ reg = 0;
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
@@ -501,11 +502,27 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
+ int retval;
+
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev)))
return -EIO;
- return rt2800_enable_radio(rt2x00dev);
+ retval = rt2800_enable_radio(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /* After resume MCU_BOOT_SIGNAL will trash these. */
+ rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
+ rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
+
+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_RADIO_OFF, 0xff, 0x02);
+ rt2800pci_mcu_status(rt2x00dev, TOKEN_RADIO_OFF);
+
+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKEUP, 0, 0);
+ rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP);
+
+ return retval;
}
static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
@@ -521,14 +538,16 @@ static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
if (state == STATE_AWAKE) {
- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
- rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKEUP,
+ 0, 0x02);
+ rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP);
} else if (state == STATE_SLEEP) {
rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS,
0xffffffff);
rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID,
0xffffffff);
- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_SLEEP,
+ 0xff, 0x01);
}
return 0;
@@ -541,13 +560,6 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
switch (state) {
case STATE_RADIO_ON:
- /*
- * Before the radio can be enabled, the device first has
- * to be woken up. After that it needs a bit of time
- * to be fully awake and then the radio can be enabled.
- */
- rt2800pci_set_state(rt2x00dev, STATE_AWAKE);
- msleep(1);
retval = rt2800pci_enable_radio(rt2x00dev);
break;
case STATE_RADIO_OFF:
@@ -797,7 +809,33 @@ static void rt2800pci_pretbtt_tasklet(unsigned long data)
static void rt2800pci_tbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u32 reg;
+
rt2x00lib_beacondone(rt2x00dev);
+
+ if (rt2x00dev->intf_ap_count) {
+ /*
+ * The rt2800pci hardware tbtt timer is off by 1us per tbtt
+ * causing beacon skew and as a result causing problems with
+ * some powersaving clients over time. Shorten the beacon
+ * interval every 64 beacons by 64us to mitigate this effect.
+ */
+ if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
+ rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16) - 1);
+ rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
+ rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16));
+ rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ }
+ drv_data->tbtt_tick++;
+ drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
+ }
+
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
}
@@ -1050,6 +1088,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
+ .vco_calibration = rt2800_vco_calibration,
.start_queue = rt2800pci_start_queue,
.kick_queue = rt2800pci_kick_queue,
.stop_queue = rt2800pci_stop_queue,
@@ -1093,6 +1132,7 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
static const struct rt2x00_ops rt2800pci_ops = {
.name = KBUILD_MODNAME,
+ .drv_data_size = sizeof(struct rt2800_drv_data),
.max_sta_intf = 1,
.max_ap_intf = 8,
.eeprom_size = EEPROM_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 262ee9eefb6f..cd490abced91 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -114,45 +114,103 @@ static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
return false;
}
+static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
+{
+ bool tout;
+
+ if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ return false;
+
+ tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
+ if (unlikely(tout))
+ WARNING(entry->queue->rt2x00dev,
+ "TX status timeout for entry %d in queue %d\n",
+ entry->entry_idx, entry->queue->qid);
+ return tout;
+
+}
+
+static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ struct queue_entry *entry;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+ if (rt2800usb_entry_txstatus_timeout(entry))
+ return true;
+ }
+ return false;
+}
+
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
int urb_status, u32 tx_status)
{
+ bool valid;
+
if (urb_status) {
- WARNING(rt2x00dev, "rt2x00usb_register_read_async failed: %d\n", urb_status);
- return false;
+ WARNING(rt2x00dev, "TX status read failed %d\n", urb_status);
+
+ goto stop_reading;
}
- /* try to read all TX_STA_FIFO entries before scheduling txdone_work */
- if (rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID)) {
- if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status)) {
- WARNING(rt2x00dev, "TX status FIFO overrun, "
- "drop tx status report.\n");
- queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
- } else
- return true;
- } else if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) {
+ valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
+ if (valid) {
+ if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status))
+ WARNING(rt2x00dev, "TX status FIFO overrun\n");
+
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+
+ /* Reschedule urb to read TX status again instantly */
+ return true;
} else if (rt2800usb_txstatus_pending(rt2x00dev)) {
- mod_timer(&rt2x00dev->txstatus_timer, jiffies + msecs_to_jiffies(2));
+ /* Read register after 250 us */
+ hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+ HRTIMER_MODE_REL);
+ return false;
}
- return false;
+stop_reading:
+ clear_bit(TX_STATUS_READING, &rt2x00dev->flags);
+ /*
+ * There is small race window above, between txstatus pending check and
+ * clear_bit someone could do rt2x00usb_interrupt_txdone, so recheck
+ * here again if status reading is needed.
+ */
+ if (rt2800usb_txstatus_pending(rt2x00dev) &&
+ test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
+ return true;
+ else
+ return false;
+}
+
+static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
+{
+
+ if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
+ return;
+
+ /* Read TX_STA_FIFO register after 500 us */
+ hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+ HRTIMER_MODE_REL);
}
static void rt2800usb_tx_dma_done(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO,
- rt2800usb_tx_sta_fifo_read_completed);
+ rt2800usb_async_read_tx_status(rt2x00dev);
}
-static void rt2800usb_tx_sta_fifo_timeout(unsigned long data)
+static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev =
+ container_of(timer, struct rt2x00_dev, txstatus_timer);
rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO,
rt2800usb_tx_sta_fifo_read_completed);
+
+ return HRTIMER_NORESTART;
}
/*
@@ -226,9 +284,7 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
- rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
- rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ reg = 0;
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
@@ -440,35 +496,26 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
/*
* TX control handlers
*/
-static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
+static enum txdone_entry_desc_flags
+rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
{
__le32 *txwi;
u32 word;
int wcid, ack, pid;
- int tx_wcid, tx_ack, tx_pid;
-
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
- WARNING(entry->queue->rt2x00dev,
- "Data pending for entry %u in queue %u\n",
- entry->entry_idx, entry->queue->qid);
- cond_resched();
- return false;
- }
-
- wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
- ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
- pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+ int tx_wcid, tx_ack, tx_pid, is_agg;
/*
* This frames has returned with an IO error,
* so the status report is not intended for this
* frame.
*/
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
- rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- return false;
- }
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+ return TXDONE_FAILURE;
+
+ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+ is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE);
/*
* Validate if this TX status report is intended for
@@ -481,15 +528,14 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
- if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
+ if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
WARNING(entry->queue->rt2x00dev,
"TX status report missed for queue %d entry %d\n",
- entry->queue->qid, entry->entry_idx);
- rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
- return false;
+ entry->queue->qid, entry->entry_idx);
+ return TXDONE_UNKNOWN;
}
- return true;
+ return TXDONE_SUCCESS;
}
static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
@@ -498,47 +544,44 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
struct queue_entry *entry;
u32 reg;
u8 qid;
+ enum txdone_entry_desc_flags done_status;
while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
-
- /* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
- * qid is guaranteed to be one of the TX QIDs
+ /*
+ * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
+ * guaranteed to be one of the TX QIDs .
*/
qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
- if (unlikely(!queue)) {
- WARNING(rt2x00dev, "Got TX status for an unavailable "
+
+ if (unlikely(rt2x00queue_empty(queue))) {
+ WARNING(rt2x00dev, "Got TX status for an empty "
"queue %u, dropping\n", qid);
- continue;
+ break;
}
- /*
- * Inside each queue, we process each entry in a chronological
- * order. We first check that the queue is not empty.
- */
- entry = NULL;
- while (!rt2x00queue_empty(queue)) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- if (rt2800usb_txdone_entry_check(entry, reg))
- break;
- entry = NULL;
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+
+ if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
+ WARNING(rt2x00dev, "Data pending for entry %u "
+ "in queue %u\n", entry->entry_idx, qid);
+ break;
}
- if (entry)
- rt2800_txdone_entry(entry, reg,
- rt2800usb_get_txwi(entry));
+ done_status = rt2800usb_txdone_entry_check(entry, reg);
+ if (likely(done_status == TXDONE_SUCCESS))
+ rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry));
+ else
+ rt2x00lib_txdone_noinfo(entry, done_status);
}
}
-static void rt2800usb_work_txdone(struct work_struct *work)
+static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
{
- struct rt2x00_dev *rt2x00dev =
- container_of(work, struct rt2x00_dev, txdone_work);
struct data_queue *queue;
struct queue_entry *entry;
- rt2800usb_txdone(rt2x00dev);
-
/*
* Process any trailing TX status reports for IO failures,
* we loop until we find the first non-IO error entry. This
@@ -556,20 +599,34 @@ static void rt2800usb_work_txdone(struct work_struct *work)
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- else if (rt2x00queue_status_timeout(entry))
+ else if (rt2800usb_entry_txstatus_timeout(entry))
rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
else
break;
}
}
+}
- /*
- * The hw may delay sending the packet after DMA complete
- * if the medium is busy, thus the TX_STA_FIFO entry is
- * also delayed -> use a timer to retrieve it.
- */
- if (rt2800usb_txstatus_pending(rt2x00dev))
- mod_timer(&rt2x00dev->txstatus_timer, jiffies + msecs_to_jiffies(2));
+static void rt2800usb_work_txdone(struct work_struct *work)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(work, struct rt2x00_dev, txdone_work);
+
+ while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
+ rt2800usb_txstatus_timeout(rt2x00dev)) {
+
+ rt2800usb_txdone(rt2x00dev);
+
+ rt2800usb_txdone_nostatus(rt2x00dev);
+
+ /*
+ * The hw may delay sending the packet after DMA complete
+ * if the medium is busy, thus the TX_STA_FIFO entry is
+ * also delayed -> use a timer to retrieve it.
+ */
+ if (rt2800usb_txstatus_pending(rt2x00dev))
+ rt2800usb_async_read_tx_status(rt2x00dev);
+ }
}
/*
@@ -711,9 +768,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
__set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
- setup_timer(&rt2x00dev->txstatus_timer,
- rt2800usb_tx_sta_fifo_timeout,
- (unsigned long) rt2x00dev);
+ rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout,
/*
* Set the rssi offset.
@@ -783,6 +838,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
+ .vco_calibration = rt2800_vco_calibration,
.watchdog = rt2800usb_watchdog,
.start_queue = rt2800usb_start_queue,
.kick_queue = rt2x00usb_kick_queue,
@@ -814,7 +870,7 @@ static const struct data_queue_desc rt2800usb_queue_rx = {
};
static const struct data_queue_desc rt2800usb_queue_tx = {
- .entry_num = 64,
+ .entry_num = 16,
.data_size = AGGREGATION_SIZE,
.desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
@@ -829,6 +885,7 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
static const struct rt2x00_ops rt2800usb_ops = {
.name = KBUILD_MODNAME,
+ .drv_data_size = sizeof(struct rt2800_drv_data),
.max_sta_intf = 1,
.max_ap_intf = 8,
.eeprom_size = EEPROM_SIZE,
@@ -922,6 +979,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c13) },
{ USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
+ { USB_DEVICE(0x2001, 0x3c1b) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
/* DVICO */
@@ -1101,12 +1159,26 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x5a57, 0x0284) },
#endif
#ifdef CONFIG_RT2800USB_RT53XX
+ /* Alpha */
+ { USB_DEVICE(0x2001, 0x3c15) },
+ { USB_DEVICE(0x2001, 0x3c19) },
+ /* Arcadyan */
+ { USB_DEVICE(0x043e, 0x7a12) },
/* Azurewave */
{ USB_DEVICE(0x13d3, 0x3329) },
{ USB_DEVICE(0x13d3, 0x3365) },
+ /* LG innotek */
+ { USB_DEVICE(0x043e, 0x7a22) },
+ /* Panasonic */
+ { USB_DEVICE(0x04da, 0x1801) },
+ { USB_DEVICE(0x04da, 0x1800) },
+ /* Philips */
+ { USB_DEVICE(0x0471, 0x2104) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5370) },
{ USB_DEVICE(0x148f, 0x5372) },
+ /* Unknown */
+ { USB_DEVICE(0x04da, 0x23f6) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index b03b22c47b18..471f87cab4ab 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -38,7 +38,7 @@
#include <linux/etherdevice.h>
#include <linux/input-polldev.h>
#include <linux/kfifo.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <net/mac80211.h>
@@ -192,6 +192,7 @@ struct rt2x00_chip {
#define RT3593 0x3593
#define RT3883 0x3883 /* WSOC */
#define RT5390 0x5390 /* 2.4GHz */
+#define RT5392 0x5392 /* 2.4GHz */
u16 rf;
u16 rev;
@@ -355,6 +356,11 @@ struct link {
* Work structure for scheduling periodic AGC adjustments.
*/
struct delayed_work agc_work;
+
+ /*
+ * Work structure for scheduling periodic VCO calibration.
+ */
+ struct delayed_work vco_work;
};
enum rt2x00_delayed_flags {
@@ -579,6 +585,7 @@ struct rt2x00lib_ops {
void (*link_tuner) (struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, const u32 count);
void (*gain_calibration) (struct rt2x00_dev *rt2x00dev);
+ void (*vco_calibration) (struct rt2x00_dev *rt2x00dev);
/*
* Data queue handlers.
@@ -647,6 +654,7 @@ struct rt2x00lib_ops {
*/
struct rt2x00_ops {
const char *name;
+ const unsigned int drv_data_size;
const unsigned int max_sta_intf;
const unsigned int max_ap_intf;
const unsigned int eeprom_size;
@@ -684,6 +692,12 @@ enum rt2x00_state_flags {
*/
CONFIG_CHANNEL_HT40,
CONFIG_POWERSAVING,
+
+ /*
+ * Mark we currently are sequentially reading TX_STA_FIFO register
+ * FIXME: this is for only rt2800usb, should go to private data
+ */
+ TX_STATUS_READING,
};
/*
@@ -721,6 +735,7 @@ enum rt2x00_capability_flags {
CAPABILITY_EXTERNAL_LNA_BG,
CAPABILITY_DOUBLE_ANTENNA,
CAPABILITY_BT_COEXIST,
+ CAPABILITY_VCO_RECALIBRATION,
};
/*
@@ -742,6 +757,11 @@ struct rt2x00_dev {
const struct rt2x00_ops *ops;
/*
+ * Driver data.
+ */
+ void *drv_data;
+
+ /*
* IEEE80211 control structure.
*/
struct ieee80211_hw *hw;
@@ -886,18 +906,11 @@ struct rt2x00_dev {
u8 rssi_offset;
/*
- * Frequency offset (for rt61pci & rt73usb).
+ * Frequency offset.
*/
u8 freq_offset;
/*
- * Calibration information (for rt2800usb & rt2800pci).
- * [0] -> BW20
- * [1] -> BW40
- */
- u8 calibration[2];
-
- /*
* Association id.
*/
u16 aid;
@@ -967,7 +980,7 @@ struct rt2x00_dev {
/*
* Timer to ensure tx status reports are read (rt2800usb).
*/
- struct timer_list txstatus_timer;
+ struct hrtimer txstatus_timer;
/*
* Tasklet for processing tx status reports (rt2800pci).
@@ -979,6 +992,11 @@ struct rt2x00_dev {
struct tasklet_struct autowake_tasklet;
/*
+ * Used for VCO periodic calibration.
+ */
+ int rf_channel;
+
+ /*
* Protect the interrupt mask register.
*/
spinlock_t irqmask_lock;
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index b704e5b183d0..293676bfa571 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -102,7 +102,7 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
/* Update the AID, this is needed for dynamic PS support */
rt2x00dev->aid = bss_conf->assoc ? bss_conf->aid : 0;
- rt2x00dev->last_beacon = bss_conf->timestamp;
+ rt2x00dev->last_beacon = bss_conf->last_tsf;
/* Update global beacon interval time, this is needed for PS support */
rt2x00dev->beacon_int = bss_conf->beacon_int;
@@ -232,6 +232,9 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
memcpy(&libconf.channel,
&rt2x00dev->spec.channels_info[hw_value],
sizeof(libconf.channel));
+
+ /* Used for VCO periodic calibration */
+ rt2x00dev->rf_channel = libconf.rf.channel;
}
if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index fd356b7c0476..fc9901e027c1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,6 +88,8 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00queue_start_queues(rt2x00dev);
rt2x00link_start_tuner(rt2x00dev);
rt2x00link_start_agc(rt2x00dev);
+ if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ rt2x00link_start_vcocal(rt2x00dev);
/*
* Start watchdog monitoring.
@@ -111,6 +113,8 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
* Stop all queues
*/
rt2x00link_stop_agc(rt2x00dev);
+ if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ rt2x00link_stop_vcocal(rt2x00dev);
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
rt2x00queue_flush_queues(rt2x00dev, true);
@@ -1125,6 +1129,18 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
int retval = -ENOMEM;
+ /*
+ * Allocate the driver data memory, if necessary.
+ */
+ if (rt2x00dev->ops->drv_data_size > 0) {
+ rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size,
+ GFP_KERNEL);
+ if (!rt2x00dev->drv_data) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ }
+
spin_lock_init(&rt2x00dev->irqmask_lock);
mutex_init(&rt2x00dev->csr_mutex);
@@ -1220,7 +1236,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
if (rt2x00_is_usb(rt2x00dev)) {
- del_timer_sync(&rt2x00dev->txstatus_timer);
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
@@ -1266,6 +1282,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Free queue structures.
*/
rt2x00queue_free(rt2x00dev);
+
+ /*
+ * Free the driver data.
+ */
+ if (rt2x00dev->drv_data)
+ kfree(rt2x00dev->drv_data);
}
EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 4cdf247a870d..78bd43b8961f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -33,6 +33,7 @@
#define WATCHDOG_INTERVAL round_jiffies_relative(HZ)
#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ)
#define AGC_INTERVAL round_jiffies_relative(4 * HZ)
+#define VCO_INTERVAL round_jiffies_relative(10 * HZ) /* 10 sec */
/*
* rt2x00_rate: Per rate device information
@@ -278,12 +279,24 @@ void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev);
void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev);
/**
+ * rt2x00link_start_vcocal - Start periodic VCO calibration
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ */
+void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev);
+
+/**
* rt2x00link_stop_agc - Stop periodic gain calibration
* @rt2x00dev: Pointer to &struct rt2x00_dev.
*/
void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev);
/**
+ * rt2x00link_stop_vcocal - Stop periodic VCO calibration
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ */
+void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev);
+
+/**
* rt2x00link_register - Initialize link tuning & watchdog functionality
* @rt2x00dev: Pointer to &struct rt2x00_dev.
*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index ea10b0068f82..8368aab86f28 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -447,11 +447,27 @@ void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev)
AGC_INTERVAL);
}
+void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev)
+{
+ struct link *link = &rt2x00dev->link;
+
+ if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
+ rt2x00dev->ops->lib->vco_calibration)
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->vco_work,
+ VCO_INTERVAL);
+}
+
void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev)
{
cancel_delayed_work_sync(&rt2x00dev->link.agc_work);
}
+void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev)
+{
+ cancel_delayed_work_sync(&rt2x00dev->link.vco_work);
+}
+
static void rt2x00link_agc(struct work_struct *work)
{
struct rt2x00_dev *rt2x00dev =
@@ -473,9 +489,32 @@ static void rt2x00link_agc(struct work_struct *work)
AGC_INTERVAL);
}
+static void rt2x00link_vcocal(struct work_struct *work)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(work, struct rt2x00_dev, link.vco_work.work);
+ struct link *link = &rt2x00dev->link;
+
+ /*
+ * When the radio is shutting down we should
+ * immediately cease the VCO calibration.
+ */
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return;
+
+ rt2x00dev->ops->lib->vco_calibration(rt2x00dev);
+
+ if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->vco_work,
+ VCO_INTERVAL);
+}
+
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
+ if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 349008d1fb28..5f1392c72673 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -636,18 +636,6 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
{
return rt2x00queue_available(queue) < queue->threshold;
}
-
-/**
- * rt2x00queue_status_timeout - Check if a timeout occurred for STATUS reports
- * @entry: Queue entry to check.
- */
-static inline int rt2x00queue_status_timeout(struct queue_entry *entry)
-{
- if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
- return false;
- return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
-}
-
/**
* rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
* @entry: Queue entry to check.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 2eea3866504d..66094eb21b61 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -526,22 +526,6 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
rt2x00queue_flush_queue(queue, true);
}
-static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
-{
- WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
- " invoke forced tx handler\n", queue->qid);
-
- queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
-}
-
-static int rt2x00usb_status_timeout(struct data_queue *queue)
-{
- struct queue_entry *entry;
-
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- return rt2x00queue_status_timeout(entry);
-}
-
static int rt2x00usb_dma_timeout(struct data_queue *queue)
{
struct queue_entry *entry;
@@ -558,8 +542,6 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
if (!rt2x00queue_empty(queue)) {
if (rt2x00usb_dma_timeout(queue))
rt2x00usb_watchdog_tx_dma(queue);
- if (rt2x00usb_status_timeout(queue))
- rt2x00usb_watchdog_tx_status(queue);
}
}
}
@@ -829,7 +811,8 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
- init_timer(&rt2x00dev->txstatus_timer);
+ hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
retval = rt2x00usb_alloc_reg(rt2x00dev);
if (retval)
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 638fbef693e6..cf53ac9d6f23 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -8,7 +8,7 @@
* Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
*
* The driver was extended to the RTL8187B in 2008 by:
- * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
* Hin-Tak Leung <htl10@users.sourceforge.net>
* Larry Finger <Larry.Finger@lwfinger.net>
*
@@ -232,6 +232,7 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct rtl8187_priv *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *tx_hdr = (struct ieee80211_hdr *)(skb->data);
unsigned int ep;
void *buf;
struct urb *urb;
@@ -249,7 +250,7 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
flags |= RTL818X_TX_DESC_FLAG_NO_ENC;
flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
- if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control))
+ if (ieee80211_has_morefrags(tx_hdr->frame_control))
flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= RTL818X_TX_DESC_FLAG_RTS;
@@ -261,6 +262,13 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
}
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ priv->seqno += 0x10;
+ tx_hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ tx_hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
+ }
+
if (!priv->is_rtl8187b) {
struct rtl8187_tx_hdr *hdr =
(struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
@@ -274,8 +282,6 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
} else {
/* fc needs to be calculated before skb_push() */
unsigned int epmap[4] = { 6, 7, 5, 4 };
- struct ieee80211_hdr *tx_hdr =
- (struct ieee80211_hdr *)(skb->data);
u16 fc = le16_to_cpu(tx_hdr->frame_control);
struct rtl8187b_tx_hdr *hdr =
@@ -1031,10 +1037,61 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
cancel_delayed_work_sync(&priv->work);
}
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
+
+static void rtl8187_beacon_work(struct work_struct *work)
+{
+ struct rtl8187_vif *vif_priv =
+ container_of(work, struct rtl8187_vif, beacon_work.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+ struct ieee80211_hw *dev = vif_priv->dev;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+
+ /* don't overflow the tx ring */
+ if (ieee80211_queue_stopped(dev, 0))
+ goto resched;
+
+ /* grab a fresh beacon */
+ skb = ieee80211_beacon_get(dev, vif);
+ if (!skb)
+ goto resched;
+
+ /*
+ * update beacon timestamp w/ TSF value
+ * TODO: make hardware update beacon timestamp
+ */
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ mgmt->u.beacon.timestamp = cpu_to_le64(rtl8187_get_tsf(dev, vif));
+
+ /* TODO: use actual beacon queue */
+ skb_set_queue_mapping(skb, 0);
+
+ rtl8187_tx(dev, skb);
+
+resched:
+ /*
+ * schedule next beacon
+ * TODO: use hardware support for beacon timing
+ */
+ schedule_delayed_work(&vif_priv->beacon_work,
+ usecs_to_jiffies(1024 * vif->bss_conf.beacon_int));
+}
+
+
static int rtl8187_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
+ struct rtl8187_vif *vif_priv;
int i;
int ret = -EOPNOTSUPP;
@@ -1044,6 +1101,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
break;
default:
goto exit;
@@ -1052,6 +1110,13 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
ret = 0;
priv->vif = vif;
+ /* Initialize driver private area */
+ vif_priv = (struct rtl8187_vif *)&vif->drv_priv;
+ vif_priv->dev = dev;
+ INIT_DELAYED_WORK(&vif_priv->beacon_work, rtl8187_beacon_work);
+ vif_priv->enable_beacon = false;
+
+
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->MAC[i],
@@ -1175,9 +1240,12 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
u32 changed)
{
struct rtl8187_priv *priv = dev->priv;
+ struct rtl8187_vif *vif_priv;
int i;
u8 reg;
+ vif_priv = (struct rtl8187_vif *)&vif->drv_priv;
+
if (changed & BSS_CHANGED_BSSID) {
mutex_lock(&priv->conf_mutex);
for (i = 0; i < ETH_ALEN; i++)
@@ -1189,8 +1257,12 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
else
reg = 0;
- if (is_valid_ether_addr(info->bssid))
- reg |= RTL818X_MSR_INFRA;
+ if (is_valid_ether_addr(info->bssid)) {
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ reg |= RTL818X_MSR_ADHOC;
+ else
+ reg |= RTL818X_MSR_INFRA;
+ }
else
reg |= RTL818X_MSR_NO_LINK;
@@ -1202,6 +1274,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE))
rtl8187_conf_erp(priv, info->use_short_slot,
info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ vif_priv->enable_beacon = info->enable_beacon;
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON)) {
+ cancel_delayed_work_sync(&vif_priv->beacon_work);
+ if (vif_priv->enable_beacon)
+ schedule_work(&vif_priv->beacon_work.work);
+ }
+
}
static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
@@ -1279,13 +1361,6 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev,
return 0;
}
-static u64 rtl8187_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif)
-{
- struct rtl8187_priv *priv = dev->priv;
-
- return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
- (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
-}
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
@@ -1514,12 +1589,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
if (reg & 0xFF00)
priv->rfkill_mask = RFKILL_MASK_8198;
}
-
- /*
- * XXX: Once this driver supports anything that requires
- * beacons it must implement IEEE80211_TX_CTL_ASSIGN_SEQ.
- */
- dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ dev->vif_data_size = sizeof(struct rtl8187_vif);
+ dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) ;
if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index f1cc90751dbf..e19a20a8e955 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -89,6 +89,14 @@ enum {
DEVICE_RTL8187B
};
+struct rtl8187_vif {
+ struct ieee80211_hw *dev;
+
+ /* beaconing */
+ struct delayed_work beacon_work;
+ bool enable_beacon;
+};
+
struct rtl8187_priv {
/* common between rtl818x drivers */
struct rtl818x_csr *map;
@@ -141,6 +149,7 @@ struct rtl8187_priv {
__le32 bits32;
} *io_dmabuf;
bool rfkill_off;
+ u16 seqno;
};
void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index d6c42e69bdbd..cefac6a43601 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -49,6 +49,11 @@ config RTLWIFI
depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
default m
+config RTLWIFI_DEBUG
+ bool "Additional debugging output"
+ depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
+ default y
+
config RTL8192C_COMMON
tristate
depends on RTL8192CE || RTL8192CU
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8d6eb0f56c03..510023554e5f 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,10 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/ip.h>
-#include <linux/module.h>
#include "wifi.h"
#include "rc.h"
#include "base.h"
@@ -39,11 +35,14 @@
#include "ps.h"
#include "regd.h"
+#include <linux/ip.h>
+#include <linux/module.h>
+
/*
- *NOTICE!!!: This file will be very big, we hsould
- *keep it clear under follwing roles:
+ *NOTICE!!!: This file will be very big, we should
+ *keep it clear under following roles:
*
- *This file include follwing part, so, if you add new
+ *This file include following parts, so, if you add new
*functions into this file, please check which part it
*should includes. or check if you should add new part
*for this file:
@@ -211,7 +210,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
*/
if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_2T2R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T2R or 2T2R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
@@ -220,7 +219,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
} else if (get_rf_type(rtlphy) == RF_1T1R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
@@ -302,15 +301,13 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/* <4> set mac->sband to wiphy->sband */
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Err BAND %d\n",
- rtlhal->current_bandtype));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
+ rtlhal->current_bandtype);
}
}
/* <5> set hw caps */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_CONNECTION_MONITOR |
/* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
@@ -413,6 +410,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
wiphy_rfkill_start_polling(hw->wiphy);
}
+EXPORT_SYMBOL(rtl_init_rfkill);
void rtl_deinit_rfkill(struct ieee80211_hw *hw)
{
@@ -436,13 +434,13 @@ int rtl_init_core(struct ieee80211_hw *hw)
* mac80211 hw in _rtl_init_mac80211.
*/
if (rtl_regd_init(hw, rtl_reg_notifier)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("REGD init failed\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n");
return 1;
} else {
/* CRDA regd hint must after init CRDA */
if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("regulatory_hint fail\n"));
+ "regulatory_hint fail\n");
}
}
@@ -922,17 +920,17 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return false;
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("%s ACT_ADDBAREQ From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2));
+ "%s ACT_ADDBAREQ From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2);
break;
case ACT_ADDBARSP:
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("%s ACT_ADDBARSP From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2));
+ "%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2);
break;
case ACT_DELBA:
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("ACT_ADDBADEL From :%pM\n", hdr->addr2));
+ "ACT_ADDBADEL From :%pM\n", hdr->addr2);
break;
}
break;
@@ -975,8 +973,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
* 67 : UDP BOOTP server
*/
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, ("dhcp %s !!\n",
- (is_tx) ? "Tx" : "Rx"));
+ DBG_DMESG, "dhcp %s !!\n",
+ is_tx ? "Tx" : "Rx");
if (is_tx) {
rtl_lps_leave(hw);
@@ -996,7 +994,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return true;
} else if (ETH_P_PAE == ether_type) {
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
+ "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
if (is_tx) {
rtl_lps_leave(hw);
@@ -1036,9 +1034,8 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- tid_data->seq_number));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d seq:%d\n",
+ sta->addr, tid, tid_data->seq_number);
*ssn = tid_data->seq_number;
tid_data->agg.agg_state = RTL_AGG_START;
@@ -1059,12 +1056,12 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw,
return -EINVAL;
if (!sta->addr) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ra = NULL\n");
return -EINVAL;
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- ("on ra = %pM tid = %d\n", sta->addr, tid));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d\n",
+ sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1087,12 +1084,12 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
return -EINVAL;
if (!sta->addr) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ra = NULL\n");
return -EINVAL;
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- ("on ra = %pM tid = %d\n", sta->addr, tid));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d\n",
+ sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1474,29 +1471,29 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
(memcmp(mac->bssid, ap5_6, 3) == 0) ||
vendor == PEER_ATH) {
vendor = PEER_ATH;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>ath find\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
} else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
(memcmp(mac->bssid, ap4_5, 3) == 0) ||
(memcmp(mac->bssid, ap4_1, 3) == 0) ||
(memcmp(mac->bssid, ap4_2, 3) == 0) ||
(memcmp(mac->bssid, ap4_3, 3) == 0) ||
vendor == PEER_RAL) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>ral findn\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
vendor = PEER_RAL;
} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
vendor == PEER_CISCO) {
vendor = PEER_CISCO;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>cisco find\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
} else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
(memcmp(mac->bssid, ap3_2, 3) == 0) ||
(memcmp(mac->bssid, ap3_3, 3) == 0) ||
vendor == PEER_BROAD) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>broad find\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
vendor = PEER_BROAD;
} else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
vendor == PEER_MARV) {
vendor = PEER_MARV;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>marv find\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
}
mac->vendor = vendor;
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index f66b5757f6b9..5a23a6d0f49d 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index dc36d7461caa..5c7d57947d23 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/export.h>
#include "wifi.h"
#include "cam.h"
@@ -55,10 +53,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
u8 entry_i;
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
- key_cont_128[0], key_cont_128[1],
- key_cont_128[2], key_cont_128[3],
- key_cont_128[4], key_cont_128[5]));
+ "key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
+ key_cont_128[0], key_cont_128[1],
+ key_cont_128[2], key_cont_128[3],
+ key_cont_128[4], key_cont_128[5]);
for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
@@ -73,14 +71,12 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[WCAMI], target_content);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("WRITE %x: %x\n",
- rtlpriv->cfg->maps[WCAMI], target_content));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The Key ID is %d\n", entry_no));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("WRITE %x: %x\n",
- rtlpriv->cfg->maps[RWCAM], target_command));
+ "The Key ID is %d\n", entry_no);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[RWCAM], target_command);
} else if (entry_i == 1) {
@@ -94,10 +90,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("WRITE A4: %x\n", target_content));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("WRITE A0: %x\n", target_command));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A4: %x\n",
+ target_content);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A0: %x\n",
+ target_command);
} else {
@@ -114,15 +110,15 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
target_command);
udelay(100);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("WRITE A4: %x\n", target_content));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("WRITE A0: %x\n", target_command));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A4: %x\n",
+ target_content);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A0: %x\n",
+ target_command);
}
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("after set key, usconfig:%x\n", us_config));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "after set key, usconfig:%x\n",
+ us_config);
}
u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
@@ -133,14 +129,13 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
- "ulUseDK=%x MacAddr %pM\n",
- ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, mac_addr));
+ "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr);
if (ul_key_id == TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("<=== ulKeyId exceed!\n"));
+ "<=== ulKeyId exceed!\n");
return 0;
}
@@ -153,7 +148,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
(u8 *) key_content, us_config);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("<===\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n");
return 1;
@@ -166,7 +161,7 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
u32 ul_command;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
ul_command = ul_key_id * CAM_CONTENT_COUNT;
ul_command = ul_command | BIT(31) | BIT(16);
@@ -175,9 +170,9 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0));
+ "rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command));
+ "rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command);
return 0;
@@ -229,9 +224,9 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content));
+ "rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command));
+ "rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command);
}
EXPORT_SYMBOL(rtl_cam_mark_invalid);
@@ -279,11 +274,11 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("rtl_cam_empty_entry(): WRITE A4: %x\n",
- ul_content));
+ "rtl_cam_empty_entry(): WRITE A4: %x\n",
+ ul_content);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("rtl_cam_empty_entry(): WRITE A0: %x\n",
- ul_command));
+ "rtl_cam_empty_entry(): WRITE A0: %x\n",
+ ul_command);
}
}
@@ -297,8 +292,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
u8 i, *addr;
if (NULL == sta_addr) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- ("sta_addr is NULL.\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n");
return TOTAL_CAM_ENTRY;
}
/* Does STA already exist? */
@@ -311,8 +305,8 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
if ((bitmap & BIT(0)) == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- ("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
- rtlpriv->sec.hwsec_cam_bitmap, entry_idx));
+ "-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
+ rtlpriv->sec.hwsec_cam_bitmap, entry_idx);
rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
sta_addr, ETH_ALEN);
@@ -331,14 +325,13 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
u8 i, *addr;
if (NULL == sta_addr) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- ("sta_addr is NULL.\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n");
}
if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\
sta_addr[4]|sta_addr[5]) == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- ("sta_addr is 00:00:00:00:00:00.\n"));
+ "sta_addr is 00:00:00:00:00:00\n");
return;
}
/* Does STA already exist? */
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index c62da4eefc75..35e00086a520 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3f0f056fae9c..278e9f957e0d 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,50 @@
#include "core.h"
#include "cam.h"
#include "base.h"
+#include "pci.h"
#include "ps.h"
+#include <linux/export.h>
+
+void rtl_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct ieee80211_hw *hw = context;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err;
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
+ complete(&rtlpriv->firmware_loading_complete);
+ if (!firmware) {
+ pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ rtlpriv->max_fw_size = 0;
+ return;
+ }
+ if (firmware->size > rtlpriv->max_fw_size) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Firmware is too big!\n");
+ release_firmware(firmware);
+ return;
+ }
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ release_firmware(firmware);
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Can't register mac80211 hw\n");
+ return;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+
+ /*init rfkill */
+ rtl_init_rfkill(hw);
+}
+EXPORT_SYMBOL(rtl_fw_cb);
+
/*mutex for start & stop is must here. */
static int rtl_op_start(struct ieee80211_hw *hw)
{
@@ -112,9 +154,11 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
int err = 0;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+
if (mac->vif) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("vif has been set!! mac->vif = 0x%p\n", mac->vif));
+ "vif has been set!! mac->vif = 0x%p\n", mac->vif);
return -EOPNOTSUPP;
}
@@ -125,7 +169,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_STATION:
if (mac->beacon_enabled == 1) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("NL80211_IFTYPE_STATION\n"));
+ "NL80211_IFTYPE_STATION\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
rtlpriv->cfg->maps
@@ -134,7 +178,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_ADHOC:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("NL80211_IFTYPE_ADHOC\n"));
+ "NL80211_IFTYPE_ADHOC\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -148,7 +192,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_AP:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("NL80211_IFTYPE_AP\n"));
+ "NL80211_IFTYPE_AP\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -161,7 +205,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("operation mode %d is not support!\n", vif->type));
+ "operation mode %d is not supported!\n", vif->type);
err = -EOPNOTSUPP;
goto out;
}
@@ -221,7 +265,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtlpriv->locks.conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"));
+ "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
}
/*For IPS */
@@ -264,8 +308,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count));
+ "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count);
mac->retry_long = hw->conf.long_frame_max_tx_count;
mac->retry_short = hw->conf.long_frame_max_tx_count;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
@@ -320,7 +364,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
default:
mac->bw_40 = false;
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not processed\n"));
+ "switch case not processed\n");
break;
}
@@ -369,12 +413,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Enable receive multicast frame.\n"));
+ "Enable receive multicast frame\n");
} else {
mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Disable receive multicast frame.\n"));
+ "Disable receive multicast frame\n");
}
}
@@ -382,11 +426,11 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_FCSFAIL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Enable receive FCS error frame.\n"));
+ "Enable receive FCS error frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Disable receive FCS error frame.\n"));
+ "Disable receive FCS error frame\n");
}
}
@@ -409,11 +453,11 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Enable receive control frame.\n"));
+ "Enable receive control frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Disable receive control frame.\n"));
+ "Disable receive control frame\n");
}
}
@@ -421,11 +465,11 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_OTHER_BSS) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Enable receive other BSS's frame.\n"));
+ "Enable receive other BSS's frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("Disable receive other BSS's frame.\n"));
+ "Disable receive other BSS's frame\n");
}
}
}
@@ -456,7 +500,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta_entry->wireless_mode = WIRELESS_MODE_G;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("Add sta addr is %pM\n", sta->addr));
+ "Add sta addr is %pM\n", sta->addr);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
}
return 0;
@@ -469,7 +513,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry;
if (sta) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("Remove sta addr is %pM\n", sta->addr));
+ "Remove sta addr is %pM\n", sta->addr);
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
sta_entry->wireless_mode = 0;
sta_entry->ratr_index = 0;
@@ -514,7 +558,7 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
if (queue >= AC_MAX) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("queue number %d is incorrect!\n", queue));
+ "queue number %d is incorrect!\n", queue);
return -EINVAL;
}
@@ -547,7 +591,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("BSS_CHANGED_BEACON_ENABLED\n"));
+ "BSS_CHANGED_BEACON_ENABLED\n");
/*start hw beacon interrupt. */
/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
@@ -565,7 +609,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
!bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 1) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("ADHOC DISABLE BEACON\n"));
+ "ADHOC DISABLE BEACON\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -575,7 +619,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BEACON_INT) {
RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
- ("BSS_CHANGED_BEACON_INT\n"));
+ "BSS_CHANGED_BEACON_INT\n");
mac->beacon_interval = bss_conf->beacon_int;
rtlpriv->cfg->ops->set_bcn_intv(hw);
}
@@ -604,7 +648,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_STATION && sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("BSS_CHANGED_ASSOC\n"));
+ "BSS_CHANGED_ASSOC\n");
} else {
if (mac->link_state == MAC80211_LINKED)
rtl_lps_leave(hw);
@@ -619,20 +663,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->vendor = PEER_UNKNOWN;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("BSS_CHANGED_UN_ASSOC\n"));
+ "BSS_CHANGED_UN_ASSOC\n");
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("BSS_CHANGED_ERP_CTS_PROT\n"));
+ "BSS_CHANGED_ERP_CTS_PROT\n");
mac->use_cts_protect = bss_conf->use_cts_prot;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
- bss_conf->use_short_preamble));
+ "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
+ bss_conf->use_short_preamble);
mac->short_preamble = bss_conf->use_short_preamble;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
@@ -641,7 +685,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_SLOT) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("BSS_CHANGED_ERP_SLOT\n"));
+ "BSS_CHANGED_ERP_SLOT\n");
if (bss_conf->use_short_slot)
mac->slot_time = RTL_SLOT_TIME_9;
@@ -653,8 +697,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("BSS_CHANGED_HT\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, "BSS_CHANGED_HT\n");
rcu_read_lock();
sta = get_sta(hw, vif, bss_conf->bssid);
if (sta) {
@@ -683,8 +726,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
(u8 *) bss_conf->bssid);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("%pM\n", bss_conf->bssid));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "%pM\n",
+ bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
memcpy(mac->bssid, bss_conf->bssid, 6);
@@ -831,30 +874,30 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
+ "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
return rtl_tx_agg_start(hw, sta, tid, ssn);
break;
case IEEE80211_AMPDU_TX_STOP:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
+ "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
return rtl_tx_agg_stop(hw, sta, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
+ "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
rtl_tx_agg_oper(hw, sta, tid);
break;
case IEEE80211_AMPDU_RX_START:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid));
+ "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
break;
case IEEE80211_AMPDU_RX_STOP:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid));
+ "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("IEEE80211_AMPDU_ERR!!!!:\n"));
+ "IEEE80211_AMPDU_ERR!!!!:\n");
return -EOPNOTSUPP;
}
return 0;
@@ -867,7 +910,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
mac->act_scanning = true;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
if (mac->link_state == MAC80211_LINKED) {
rtl_lps_leave(hw);
@@ -888,7 +931,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = false;
/* Dual mac */
rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false;
@@ -921,13 +964,13 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("not open hw encryption\n"));
+ "not open hw encryption\n");
return -ENOSPC; /*User disabled HW-crypto */
}
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("%s hardware based encryption for keyidx: %d, mac: %pM\n",
- cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
- sta ? sta->addr : bcast_addr));
+ "%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr);
rtlpriv->sec.being_setkey = true;
rtl_ips_nic_on(hw);
mutex_lock(&rtlpriv->locks.conf_mutex);
@@ -936,24 +979,23 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key_type = WEP40_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("alg:WEP104\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
key_type = WEP104_ENCRYPTION;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = TKIP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = AESCCMP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("alg_err:%x!!!!:\n", key->cipher));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "alg_err:%x!!!!\n",
+ key->cipher);
goto out_unlock;
}
if (key_type == WEP40_ENCRYPTION ||
@@ -995,8 +1037,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
wep_only = true;
rtlpriv->sec.pairwise_enc_algorithm = key_type;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1"
- " TKIP:2 AES:4 WEP104:5)\n", key_type));
+ "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
+ key_type);
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
}
@@ -1005,7 +1047,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case SET_KEY:
if (wep_only) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set WEP(group/pairwise) key\n"));
+ "set WEP(group/pairwise) key\n");
/* Pairwise key with an assigned MAC address. */
rtlpriv->sec.pairwise_enc_algorithm = key_type;
rtlpriv->sec.group_enc_algorithm = key_type;
@@ -1016,7 +1058,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
memcpy(mac_addr, zero_addr, ETH_ALEN);
} else if (group_key) { /* group key */
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set group key\n"));
+ "set group key\n");
/* group key */
rtlpriv->sec.group_enc_algorithm = key_type;
/*set local buf about group key. */
@@ -1026,10 +1068,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
memcpy(mac_addr, bcast_addr, ETH_ALEN);
} else { /* pairwise key */
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set pairwise key\n"));
+ "set pairwise key\n");
if (!sta) {
- RT_ASSERT(false, ("pairwise key withnot"
- "mac_addr\n"));
+ RT_ASSERT(false,
+ "pairwise key without mac_addr\n");
err = -EOPNOTSUPP;
goto out_unlock;
@@ -1056,7 +1098,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("disable key delete one entry\n"));
+ "disable key delete one entry\n");
/*set local buf about wep key. */
if (mac->opmode == NL80211_IFTYPE_AP) {
if (sta)
@@ -1077,7 +1119,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("cmd_err:%x!!!!:\n", cmd));
+ "cmd_err:%x!!!!\n", cmd);
}
out_unlock:
mutex_unlock(&rtlpriv->locks.conf_mutex);
@@ -1106,8 +1148,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
rtlpriv->rfkill.rfkill_state = radio_state;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- (KERN_INFO "wireless radio switch turned %s\n",
- radio_state ? "on" : "off"));
+ "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off");
blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index f02824a3b747..2fe46a1b4f1f 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* Tmis program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -30,8 +30,6 @@
#ifndef __RTL_CORE_H__
#define __RTL_CORE_H__
-#include <net/mac80211.h>
-
#define RTL_SUPPORTED_FILTERS \
(FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | FIF_CONTROL | \
@@ -42,4 +40,6 @@
#define RTL_SUPPORTED_CTRL_FILTER 0xFF
extern const struct ieee80211_ops rtl_ops;
+void rtl_fw_cb(const struct firmware *firmware, void *context);
+
#endif
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 1b5cb7153a52..bdda9b2fffe1 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* Tmis program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -28,6 +28,8 @@
#include "wifi.h"
+#include <linux/moduleparam.h>
+
void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index 160dd0685213..07493d2957f2 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* Tmis program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -156,53 +156,78 @@ enum dbgp_flag_e {
DBGP_TYPE_MAX
};
-#define RT_ASSERT(_exp, fmt) \
- do { \
- if (!(_exp)) { \
- printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \
- __func__); \
- printk fmt; \
- } \
- } while (0);
-
-#define RT_TRACE(rtlpriv, comp, level, fmt)\
- do { \
- if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
- ((level) <= rtlpriv->dbg.global_debuglevel))) {\
- printk(KERN_DEBUG "%s:%s():<%lx-%x> ", KBUILD_MODNAME, \
- __func__, in_interrupt(), in_atomic()); \
- printk fmt; \
- } \
- } while (0);
-
-#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr) \
- do { \
- if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
- printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
- printk printstr; \
- } \
- } while (0);
-
-#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
- _hexdatalen) \
- do {\
- if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) &&\
- (_level <= rtlpriv->dbg.global_debuglevel))) { \
- int __i; \
- u8* ptr = (u8 *)_hexdata; \
- printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
- printk("In process \"%s\" (pid %i):", current->comm,\
- current->pid); \
- printk(_titlestring); \
- for (__i = 0; __i < (int)_hexdatalen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) % 4)\
- == 0) ? " " : " ");\
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk(KERN_DEBUG "\n"); \
- } \
- } while (0);
+#ifdef CONFIG_RTLWIFI_DEBUG
+
+#define RT_ASSERT(_exp, fmt, ...) \
+do { \
+ if (!(_exp)) { \
+ printk(KERN_DEBUG KBUILD_MODNAME ":%s(): " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
+do { \
+ if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
+ ((level) <= rtlpriv->dbg.global_debuglevel))) { \
+ printk(KERN_DEBUG KBUILD_MODNAME ":%s():<%lx-%x> " fmt, \
+ __func__, in_interrupt(), in_atomic(), \
+ ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \
+do { \
+ if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
+ printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
+ ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
+ _hexdatalen) \
+do { \
+ if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) && \
+ (_level <= rtlpriv->dbg.global_debuglevel))) { \
+ printk(KERN_DEBUG "%s: In process \"%s\" (pid %i): %s\n", \
+ KBUILD_MODNAME, current->comm, current->pid, \
+ _titlestring); \
+ print_hex_dump_bytes("", DUMP_PREFIX_NONE, \
+ _hexdata, _hexdatalen); \
+ } \
+} while (0)
+
+#else
+
+struct rtl_priv;
+
+__printf(2, 3)
+static inline void RT_ASSERT(int exp, const char *fmt, ...)
+{
+}
+
+__printf(4, 5)
+static inline void RT_TRACE(struct rtl_priv *rtlpriv,
+ int comp, int level,
+ const char *fmt, ...)
+{
+}
+
+__printf(4, 5)
+static inline void RTPRINT(struct rtl_priv *rtlpriv,
+ int dbgtype, int dbgflag,
+ const char *fmt, ...)
+{
+}
+
+static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
+ int comp, int level,
+ const char *titlestring,
+ const void *hexdata, size_t hexdatalen)
+{
+}
+
+#endif
void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index ed1058b71587..1f143800a8d7 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* Tmis program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -162,8 +162,8 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("Addr=%x Data =%x\n", address, value));
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
+ address, value);
if (address < efuse_len) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
@@ -252,8 +252,8 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("read_efuse(): Invalid offset(%#x) with read "
- "bytes(%#x)!!\n", _offset, _size_byte));
+ "read_efuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",
+ _offset, _size_byte);
return;
}
@@ -280,7 +280,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
if (*rtemp8 != 0xFF) {
efuse_utilized++;
RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
- ("Addr=%d\n", efuse_addr));
+ "Addr=%d\n", efuse_addr);
efuse_addr++;
}
@@ -290,13 +290,13 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
if (offset < efuse_max_section) {
wren = (*rtemp8 & 0x0f);
RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
- ("offset-%d Worden=%x\n", offset, wren));
+ "offset-%d Worden=%x\n", offset, wren);
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
if (!(wren & 0x01)) {
RTPRINT(rtlpriv, FEEPROM,
- EFUSE_READ_ALL, ("Addr=%d\n",
- efuse_addr));
+ EFUSE_READ_ALL,
+ "Addr=%d\n", efuse_addr);
read_efuse_byte(hw, efuse_addr, rtemp8);
efuse_addr++;
@@ -308,8 +308,8 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
break;
RTPRINT(rtlpriv, FEEPROM,
- EFUSE_READ_ALL, ("Addr=%d\n",
- efuse_addr));
+ EFUSE_READ_ALL,
+ "Addr=%d\n", efuse_addr);
read_efuse_byte(hw, efuse_addr, rtemp8);
efuse_addr++;
@@ -326,7 +326,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
- ("Addr=%d\n", efuse_addr));
+ "Addr=%d\n", efuse_addr);
read_efuse_byte(hw, efuse_addr, rtemp8);
if (*rtemp8 != 0xFF && (efuse_addr < efuse_len)) {
efuse_utilized++;
@@ -395,9 +395,8 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
result = false;
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("efuse_shadow_update_chk(): totalbytes(%#x), "
- "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
- totalbytes, hdr_num, words_need, efuse_used));
+ "efuse_shadow_update_chk(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ totalbytes, hdr_num, words_need, efuse_used);
return result;
}
@@ -434,7 +433,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
u8 word_en = 0x0F;
u8 first_pg = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("--->\n"));
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "--->\n");
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
@@ -443,7 +442,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("<---efuse out of capacity!!\n"));
+ "<---efuse out of capacity!!\n");
return false;
}
efuse_power_switch(hw, true, true);
@@ -478,12 +477,12 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
8);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
- ("U-efuse\n"), tmpdata, 8);
+ "U-efuse", tmpdata, 8);
if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
tmpdata)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("PG section(%#x) fail!!\n", offset));
+ "PG section(%#x) fail!!\n", offset);
break;
}
}
@@ -497,7 +496,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "<---\n");
return true;
}
@@ -634,8 +633,8 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("Addr = %x Data=%x\n", addr, data));
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr = %x Data=%x\n",
+ addr, data);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
@@ -778,7 +777,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
dataempty = false;
}
- if (dataempty == false) {
+ if (!dataempty) {
*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
@@ -851,7 +850,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
}
}
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n"));
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_HEADER-1\n");
}
static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
@@ -916,7 +915,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- ("efuse PG_STATE_HEADER-2\n"));
+ "efuse PG_STATE_HEADER-2\n");
}
}
@@ -936,7 +935,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
if (efuse_get_current_size(hw) >=
(EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- ("efuse_pg_packet_write error\n"));
+ "efuse_pg_packet_write error\n");
return false;
}
@@ -948,7 +947,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
efuse_word_enable_data_read(word_en, data, target_pkt.data);
target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
- RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n");
while (continual && (efuse_addr <
(EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
@@ -956,7 +955,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
if (write_state == PG_STATE_HEADER) {
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- ("efuse PG_STATE_HEADER\n"));
+ "efuse PG_STATE_HEADER\n");
if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
(efuse_data != 0xFF))
@@ -976,7 +975,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
} else if (write_state == PG_STATE_DATA) {
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- ("efuse PG_STATE_DATA\n"));
+ "efuse PG_STATE_DATA\n");
badworden =
efuse_word_enable_data_write(hw, efuse_addr + 1,
target_pkt.word_en,
@@ -999,14 +998,14 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
result = false;
}
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
- ("efuse PG_STATE_HEADER-3\n"));
+ "efuse PG_STATE_HEADER-3\n");
}
}
}
if (efuse_addr >= (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("efuse_addr(%#x) Out of size!!\n", efuse_addr));
+ "efuse_addr(%#x) Out of size!!\n", efuse_addr);
}
return true;
@@ -1046,8 +1045,8 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
u8 tmpdata[8];
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "word_en = %x efuse_addr=%x\n",
+ word_en, efuse_addr);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 164dabaa7615..2bdea9a8699e 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 9245d882c06a..07dd38efe62a 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,13 +27,13 @@
*
*****************************************************************************/
-#include <linux/export.h>
-#include "core.h"
#include "wifi.h"
+#include "core.h"
#include "pci.h"
#include "base.h"
#include "ps.h"
#include "efuse.h"
+#include <linux/export.h>
static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
PCI_VENDOR_ID_INTEL,
@@ -170,7 +170,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -198,7 +198,7 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
}
/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
-static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -207,8 +207,6 @@ static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
udelay(100);
-
- return true;
}
/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
@@ -232,7 +230,7 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("PCI(Bridge) UNKNOWN.\n"));
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -286,7 +284,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("PCI(Bridge) UNKNOWN.\n"));
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -303,11 +301,10 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
u_pcibridge_aspmsetting);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("PlatformEnableASPM():PciBridge busnumber[%x], "
- "DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
- pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting));
+ "PlatformEnableASPM():PciBridge busnumber[%x], DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
+ pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting);
udelay(50);
@@ -382,9 +379,8 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Link Control Register =%x\n",
- pcipriv->ndis_adapter.linkctrl_reg));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT(4);
@@ -551,11 +547,10 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
skb_pull(skb, EM_HDR_LEN);
RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
- ("new ring->idx:%d, "
- "free: skb_queue_len:%d, free: seq:%x\n",
- ring->idx,
- skb_queue_len(&ring->queue),
- *(u16 *) (skb->data + 22)));
+ "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *) (skb->data + 22));
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
@@ -593,11 +588,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
== 2) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- ("more desc left, wake"
- "skb_queue@%d,ring->idx = %d,"
- "skb_queue_len = 0x%d\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue)));
+ "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue));
ieee80211_wake_queue(hw,
skb_get_queue_mapping
@@ -657,6 +650,8 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
uskb = dev_alloc_skb(skb->len + 128);
+ if (!uskb)
+ return; /* exit if allocation failed */
memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
pdata = (u8 *)skb_put(uskb, skb->len);
memcpy(pdata, skb->data, skb->len);
@@ -709,9 +704,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
- DBG_DMESG,
- ("can't alloc skb for rx\n"));
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), DBG_DMESG,
+ "can't alloc skb for rx\n");
goto done;
}
@@ -796,38 +790,37 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("beacon ok interrupt!\n"));
+ "beacon ok interrupt!\n");
}
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("beacon err interrupt!\n"));
+ "beacon err interrupt!\n");
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("beacon interrupt!\n"));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("prepare beacon for interrupt!\n"));
+ "prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<3> Tx related */
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("IMR_TXFOVW!\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("Manage ok interrupt!\n"));
+ "Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("HIGH_QUEUE ok interrupt!\n"));
+ "HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
@@ -835,7 +828,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("BK Tx OK interrupt!\n"));
+ "BK Tx OK interrupt!\n");
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
@@ -843,7 +836,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("BE TX OK interrupt!\n"));
+ "BE TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
@@ -851,7 +844,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("VI TX OK interrupt!\n"));
+ "VI TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
@@ -859,7 +852,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("Vo TX OK interrupt!\n"));
+ "Vo TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
@@ -868,25 +861,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- ("CMD TX OK interrupt!\n"));
+ "CMD TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
}
}
/*<2> Rx related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("rx descriptor unavailable!\n"));
+ "rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
@@ -1028,7 +1021,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
if (!ring || (unsigned long)ring & 0xFF) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Cannot allocate TX ring (prio = %d)\n", prio));
+ "Cannot allocate TX ring (prio = %d)\n", prio);
return -ENOMEM;
}
@@ -1039,8 +1032,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("queue:%d, ring_addr:%p\n", prio, ring));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
+ prio, ring);
for (i = 0; i < entries; i++) {
nextdescaddress = (u32) dma +
@@ -1078,7 +1071,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
if (!rtlpci->rx_ring[rx_queue_idx].desc ||
(unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Cannot allocate RX ring\n"));
+ "Cannot allocate RX ring\n");
return -ENOMEM;
}
@@ -1155,10 +1148,12 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
ring->idx = (ring->idx + 1) % ring->entries;
}
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->desc) * ring->entries,
- ring->desc, ring->dma);
- ring->desc = NULL;
+ if (ring->desc) {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*ring->desc) * ring->entries,
+ ring->desc, ring->dma);
+ ring->desc = NULL;
+ }
}
static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
@@ -1182,12 +1177,14 @@ static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
kfree_skb(skb);
}
- pci_free_consistent(rtlpci->pdev,
+ if (rtlpci->rx_ring[rx_queue_idx].desc) {
+ pci_free_consistent(rtlpci->pdev,
sizeof(*rtlpci->rx_ring[rx_queue_idx].
desc) * rtlpci->rxringcount,
rtlpci->rx_ring[rx_queue_idx].desc,
rtlpci->rx_ring[rx_queue_idx].dma);
- rtlpci->rx_ring[rx_queue_idx].desc = NULL;
+ rtlpci->rx_ring[rx_queue_idx].desc = NULL;
+ }
}
}
@@ -1355,7 +1352,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
u8 temp_one = 1;
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
rtl_ips_nic_on(hw);
}
@@ -1388,10 +1385,9 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("No more TX desc@%d, ring->idx = %d,"
- "idx = %d, skb_queue_len = 0x%d\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue)));
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return skb->len;
@@ -1426,11 +1422,9 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
hw_queue != BEACON_QUEUE) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- ("less desc left, stop skb_queue@%d, "
- "ring->idx = %d,"
- "idx = %d, skb_queue_len = 0x%d\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue)));
+ "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
}
@@ -1497,7 +1491,7 @@ static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
err = _rtl_pci_init_trx_ring(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("tx ring initialization failed"));
+ "tx ring initialization failed\n");
return err;
}
@@ -1519,12 +1513,12 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Failed to config hardware!\n"));
+ "Failed to config hardware!\n");
return err;
}
rtlpriv->cfg->ops->enable_interrupt(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
rtl_init_rx_config(hw);
@@ -1535,7 +1529,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->up_first_time = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
return 0;
}
@@ -1573,6 +1567,9 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->hw_disable(hw);
+ /* some things are not needed if firmware not available */
+ if (!rtlpriv->max_fw_size)
+ return;
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
@@ -1622,20 +1619,20 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
switch (revisionid) {
case RTL_PCI_REVISION_ID_8192PCIE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("8192 PCI-E is found - "
- "vid/did=%x/%x\n", venderid, deviceid));
+ "8192 PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
break;
case RTL_PCI_REVISION_ID_8192SE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("8192SE is found - "
- "vid/did=%x/%x\n", venderid, deviceid));
+ "8192SE is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Err: Unknown device - "
- "vid/did=%x/%x\n", venderid, deviceid));
+ "Err: Unknown device - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
@@ -1646,18 +1643,18 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
deviceid == RTL_PCI_8188CE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("8192C PCI-E is found - "
- "vid/did=%x/%x\n", venderid, deviceid));
+ "8192C PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192DE_DID ||
deviceid == RTL_PCI_8192DE_DID2) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("8192D PCI-E is found - "
- "vid/did=%x/%x\n", venderid, deviceid));
+ "8192D PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Err: Unknown device -"
- " vid/did=%x/%x\n", venderid, deviceid));
+ "Err: Unknown device - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
}
@@ -1665,19 +1662,18 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
if (revisionid == 0 || revisionid == 1) {
if (revisionid == 0) {
- RT_TRACE(rtlpriv, COMP_INIT,
- DBG_LOUD, ("Find 92DE MAC0.\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC0\n");
rtlhal->interfaceindex = 0;
} else if (revisionid == 1) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Find 92DE MAC1.\n"));
+ "Find 92DE MAC1\n");
rtlhal->interfaceindex = 1;
}
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Unknown device - "
- "VendorID/DeviceID=%x/%x, Revision=%x\n",
- venderid, deviceid, revisionid));
+ "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
+ venderid, deviceid, revisionid);
rtlhal->interfaceindex = 0;
}
}
@@ -1693,8 +1689,8 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Pci Bridge Vendor is found index:"
- " %d\n", tmp));
+ "Pci Bridge Vendor is found index: %d\n",
+ tmp);
break;
}
}
@@ -1723,23 +1719,21 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("pcidev busnumber:devnumber:funcnumber:"
- "vendor:link_ctl %d:%d:%d:%x:%x\n",
- pcipriv->ndis_adapter.busnumber,
- pcipriv->ndis_adapter.devnumber,
- pcipriv->ndis_adapter.funcnumber,
- pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
+ "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
- "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
- pcipriv->ndis_adapter.pcibridge_busnum,
- pcipriv->ndis_adapter.pcibridge_devnum,
- pcipriv->ndis_adapter.pcibridge_funcnum,
- pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
- pcipriv->ndis_adapter.amd_l1_patch));
+ "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
@@ -1759,18 +1753,17 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- RT_ASSERT(false,
- ("%s : Cannot enable new PCI device\n",
- pci_name(pdev)));
+ RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
+ pci_name(pdev));
return err;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- RT_ASSERT(false, ("Unable to obtain 32bit DMA "
- "for consistent allocations\n"));
- pci_disable_device(pdev);
- return -ENOMEM;
+ RT_ASSERT(false,
+ "Unable to obtain 32bit DMA for consistent allocations\n");
+ err = -ENOMEM;
+ goto fail1;
}
}
@@ -1780,7 +1773,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
sizeof(struct rtl_priv), &rtl_ops);
if (!hw) {
RT_ASSERT(false,
- ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
+ "%s : ieee80211 alloc failed\n", pci_name(pdev));
err = -ENOMEM;
goto fail1;
}
@@ -1791,6 +1784,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
rtlpriv = hw->priv;
pcipriv = (void *)rtlpriv->priv;
pcipriv->dev.pdev = pdev;
+ init_completion(&rtlpriv->firmware_loading_complete);
/* init cfg & intf_ops */
rtlpriv->rtlhal.interface = INTF_PCI;
@@ -1810,8 +1804,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
/* MEM map */
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
- RT_ASSERT(false, ("Can't obtain PCI resources\n"));
- return err;
+ RT_ASSERT(false, "Can't obtain PCI resources\n");
+ goto fail1;
}
pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
@@ -1823,15 +1817,15 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
(unsigned long)pci_iomap(pdev,
rtlpriv->cfg->bar_id, pmem_len);
if (rtlpriv->io.pci_mem_start == 0) {
- RT_ASSERT(false, ("Can't map PCI mem\n"));
+ RT_ASSERT(false, "Can't map PCI mem\n");
+ err = -ENOMEM;
goto fail2;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("mem mapped space: start: 0x%08lx len:%08lx "
- "flags:%08lx, after map:0x%08lx\n",
- pmem_start, pmem_len, pmem_flags,
- rtlpriv->io.pci_mem_start));
+ "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start);
/* Disable Clk Request */
pci_write_config_byte(pdev, 0x81, 0);
@@ -1841,8 +1835,10 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
pci_write_config_byte(pdev, 0x04, 0x07);
/* find adapter */
- if (!_rtl_pci_find_adapter(pdev, hw))
+ if (!_rtl_pci_find_adapter(pdev, hw)) {
+ err = -ENODEV;
goto fail3;
+ }
/* Init IO handler */
_rtl_pci_io_handler_init(&pdev->dev, hw);
@@ -1851,8 +1847,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't init_sw_vars.\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+ err = -ENODEV;
goto fail3;
}
@@ -1865,69 +1861,55 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
err = rtl_init_core(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't allocate sw for mac80211.\n"));
+ "Can't allocate sw for mac80211\n");
goto fail3;
}
/* Init PCI sw */
err = rtl_pci_init(hw, pdev);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to init PCI.\n"));
- goto fail3;
- }
-
- err = ieee80211_register_hw(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't register mac80211 hw.\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
goto fail3;
- } else {
- rtlpriv->mac80211.mac80211_registered = 1;
}
err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("failed to create sysfs device attributes\n"));
+ "failed to create sysfs device attributes\n");
goto fail3;
}
- /*init rfkill */
- rtl_init_rfkill(hw);
-
rtlpci = rtl_pcidev(pcipriv);
err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
IRQF_SHARED, KBUILD_MODNAME, hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("%s: failed to register IRQ handler\n",
- wiphy_name(hw->wiphy)));
+ "%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy));
goto fail3;
- } else {
- rtlpci->irq_alloc = 1;
}
+ rtlpci->irq_alloc = 1;
- set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
fail3:
- pci_set_drvdata(pdev, NULL);
rtl_deinit_core(hw);
_rtl_pci_io_handler_release(hw);
- ieee80211_free_hw(hw);
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
fail2:
pci_release_regions(pdev);
+ complete(&rtlpriv->firmware_loading_complete);
fail1:
-
+ if (hw)
+ ieee80211_free_hw(hw);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
- return -ENODEV;
+ return err;
}
EXPORT_SYMBOL(rtl_pci_probe);
@@ -1940,6 +1922,8 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+ /* just in case driver is removed before firmware callback */
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index ebe0b42c0518..241448fc9ed5 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -239,7 +239,6 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
void rtl_pci_disconnect(struct pci_dev *pdev);
int rtl_pci_suspend(struct device *dev);
int rtl_pci_resume(struct device *dev);
-
static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 130fdd99d573..5b9c3b5e8c92 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -44,10 +44,11 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
if (is_hal_stop(rtlhal))
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Driver is already down!\n"));
+ "Driver is already down!\n");
/*<2> Enable Adapter */
- rtlpriv->cfg->ops->hw_init(hw);
+ if (rtlpriv->cfg->ops->hw_init(hw))
+ return 1;
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */
@@ -104,8 +105,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
case ERFOFF:
- if ((changesource == RF_CHANGE_BY_HW)
- && (ppsc->hwradiooff == false)) {
+ if ((changesource == RF_CHANGE_BY_HW) && !ppsc->hwradiooff) {
ppsc->hwradiooff = true;
}
@@ -120,7 +120,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -176,7 +176,7 @@ void rtl_ips_nic_off_wq_callback(void *data)
if (mac->opmode != NL80211_IFTYPE_STATION) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("not station return\n"));
+ "not station return\n");
return;
}
@@ -207,7 +207,7 @@ void rtl_ips_nic_off_wq_callback(void *data)
(mac->link_state == MAC80211_NOLINK) &&
!mac->act_scanning) {
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("IPSEnter(): Turn off RF.\n"));
+ "IPSEnter(): Turn off RF\n");
ppsc->inactive_pwrstate = ERFOFF;
ppsc->in_powersavemode = true;
@@ -280,8 +280,7 @@ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
if (ps_timediff < 2000) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Delay enter Fw LPS for DHCP, ARP,"
- " or EAPOL exchanging state.\n"));
+ "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
return false;
}
@@ -328,8 +327,8 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
bool fw_current_inps;
if (ppsc->dot11_psmode == EACTIVE) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("FW LPS leave ps_mode:%x\n",
- FW_PS_ACTIVE_MODE));
+ "FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE);
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
@@ -347,8 +346,8 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
} else {
if (rtl_get_fwlps_doze(hw)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("FW LPS enter ps_mode:%x\n",
- ppsc->fwctrl_psmode));
+ "FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode);
rpwm_val = 0x02; /* RF off */
fw_current_inps = true;
@@ -402,7 +401,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->cnt_after_linked >= 2) {
if (ppsc->dot11_psmode == EACTIVE) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Enter 802.11 power save mode...\n"));
+ "Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
}
@@ -434,7 +433,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
}
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Busy Traffic,Leave 802.11 power save..\n"));
+ "Busy Traffic,Leave 802.11 power save..\n");
rtl_lps_set_psmode(hw, EACTIVE);
}
@@ -518,8 +517,8 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.ps_work, MSECS(5));
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, ("u_bufferd: %x, "
- "m_buffered: %x\n", u_buffed, m_buffed));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
@@ -607,8 +606,8 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
* sleep = dtim_period, that meaons, we should
* awake before every dtim */
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("dtim_counter:%x will sleep :%d"
- " beacon_intv\n", rtlpriv->psc.dtim_counter, sleep_intv));
+ "dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv);
/* we tested that 40ms is enough for sw & hw sw delay */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 84628e6041c7..1357856998c2 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index 539df66dce0a..c66f08a0524a 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -251,7 +251,7 @@ static void *rtl_rate_alloc_sta(void *ppriv,
rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
if (!rate_priv) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Unable to allocate private rc structure\n"));
+ "Unable to allocate private rc structure\n");
return NULL;
}
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/rtlwifi/rc.h
index 4afa2c20adcf..4d6176160610 100644
--- a/drivers/net/wireless/rtlwifi/rc.h
+++ b/drivers/net/wireless/rtlwifi/rc.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 9fedb1f70919..c1608cddc529 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -398,13 +398,11 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
- rtlpriv->regd.country_code));
+ "rtl: EEPROM regdomain: 0x%0x\n", rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- (KERN_DEBUG "rtl: EEPROM indicates invalid contry code"
- "world wide 13 should be used\n"));
+ "rtl: EEPROM indicates invalid contry code, world wide 13 should be used\n");
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
}
@@ -420,8 +418,8 @@ int rtl_regd_init(struct ieee80211_hw *hw,
}
RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n",
- rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]));
+ "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
_rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
@@ -433,7 +431,7 @@ int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, ("\n"));
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
}
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index d23118938fac..70ef2f418a44 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 72a98cab6f69..1208b753f62f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -246,16 +246,15 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
- "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
@@ -313,8 +312,8 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
dm_digtable.backoff_val;
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("rssi_val_min = %x backoff_val %x\n",
- dm_digtable.rssi_val_min, dm_digtable.backoff_val));
+ "rssi_val_min = %x backoff_val %x\n",
+ dm_digtable.rssi_val_min, dm_digtable.backoff_val);
rtl92c_dm_write_dig(hw);
}
@@ -330,8 +329,8 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC)
multi_sta = true;
- if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
- DIG_STA_DISCONNECT)) {
+ if (!multi_sta ||
+ dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
initialized = false;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
@@ -364,10 +363,9 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("curmultista_connectstate = "
- "%x dig_ext_port_stage %x\n",
- dm_digtable.curmultista_connectstate,
- dm_digtable.dig_ext_port_stage));
+ "curmultista_connectstate = %x dig_ext_port_stage %x\n",
+ dm_digtable.curmultista_connectstate,
+ dm_digtable.dig_ext_port_stage);
}
static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
@@ -375,10 +373,9 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("presta_connectstate = %x,"
- " cursta_connectctate = %x\n",
- dm_digtable.presta_connectstate,
- dm_digtable.cursta_connectctate));
+ "presta_connectstate = %x, cursta_connectctate = %x\n",
+ dm_digtable.presta_connectstate,
+ dm_digtable.cursta_connectctate);
if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
|| dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
@@ -464,11 +461,11 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
+ dm_digtable.cur_cck_pd_state);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
+ IS_92C_SERIAL(rtlhal->version));
}
static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
@@ -519,10 +516,13 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("cur_igvalue = 0x%x, "
- "pre_igvalue = 0x%x, backoff_val = %d\n",
- dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
- dm_digtable.backoff_val));
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+ dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
+ dm_digtable.backoff_val);
+
+ dm_digtable.cur_igvalue += 2;
+ if (dm_digtable.cur_igvalue > 0x3f)
+ dm_digtable.cur_igvalue = 0x3f;
if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
@@ -676,15 +676,14 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtlpriv->dm.txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
+ "rtl92c_dm_txpower_tracking_callback_thermalmeter\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
- "eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter));
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
rtl92c_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
@@ -702,10 +701,9 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Initial pathA ele_d reg0x%x = 0x%lx, "
- "ofdm_index=0x%x\n",
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]));
+ ele_d, ofdm_index_old[0]);
break;
}
}
@@ -719,11 +717,10 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
MASKOFDM_D)) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- ("Initial pathB ele_d reg0x%x = "
- "0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]));
+ DBG_LOUD,
+ "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
break;
}
}
@@ -741,11 +738,10 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
- ("Initial reg0x%x = 0x%lx, "
- "cck_index=0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14));
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
} else {
@@ -757,11 +753,10 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
- ("Initial reg0x%x = 0x%lx, "
- "cck_index=0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14));
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -790,12 +785,10 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
- "eeprom_thermalmeter 0x%x delta 0x%x "
- "delta_lck 0x%x delta_iqk 0x%x\n",
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk));
+ delta_iqk);
if (delta_lck > 1) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -815,18 +808,15 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("temp OFDM_A_index=0x%x, "
- "OFDM_B_index=0x%x,"
- "cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index));
+ "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("temp OFDM_A_index=0x%x,"
- "cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index));
+ "temp OFDM_A_index=0x%x, cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
@@ -918,16 +908,13 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("new OFDM_A_index=0x%x, "
- "OFDM_B_index=0x%x,"
- "cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index));
+ "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("new OFDM_A_index=0x%x,"
- "cck_index=0x%x\n",
- ofdm_index[0], cck_index));
+ "new OFDM_A_index=0x%x, cck_index=0x%x\n",
+ ofdm_index[0], cck_index);
}
}
@@ -1085,7 +1072,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtlpriv->dm.thermalvalue = thermalvalue;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
@@ -1098,8 +1085,8 @@ static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
rtlpriv->dm.txpower_trackinginit = false;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking));
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -1125,12 +1112,12 @@ static void rtl92c_dm_check_txpower_tracking_thermal_meter(
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
0x60);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Trigger 92S Thermal Meter!!\n"));
+ "Trigger 92S Thermal Meter!!\n");
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Schedule TxPowerTracking direct call!!\n"));
+ "Schedule TxPowerTracking direct call!!\n");
rtl92c_dm_txpower_tracking_directcall(hw);
tm_trigger = 0;
}
@@ -1169,13 +1156,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
if (is_hal_stop(rtlhal)) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("<---- driver is going to unload\n"));
+ "<---- driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("<---- driver does not control rate adaptive mask\n"));
+ "<---- driver does not control rate adaptive mask\n");
return;
}
@@ -1210,22 +1197,26 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n",
+ rtlpriv->dm.undecorated_smoothed_pwdb);
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("RSSI = %ld\n",
- rtlpriv->dm.undecorated_smoothed_pwdb));
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- ("PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state));
-
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
+
+ /* Only the PCI card uses sta in the update rate table
+ * callback routine */
+ if (rtlhal->interface == INTF_PCI) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ }
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
p_ra->ratr_state);
p_ra->pre_ratr_state = p_ra->ratr_state;
- rcu_read_unlock();
+ if (rtlhal->interface == INTF_PCI)
+ rcu_read_unlock();
}
}
}
@@ -1316,8 +1307,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
dm_pstable.rssi_val_min = 0;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("Not connected to any\n"));
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
}
if (mac->link_state == MAC80211_LINKED) {
@@ -1325,22 +1315,22 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
dm_pstable.rssi_val_min =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- dm_pstable.rssi_val_min));
+ "AP Client PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min);
} else {
dm_pstable.rssi_val_min =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- dm_pstable.rssi_val_min));
+ "STA Default Port PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min);
}
} else {
dm_pstable.rssi_val_min =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- dm_pstable.rssi_val_min));
+ "AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min);
}
if (IS_92C_SERIAL(rtlhal->version))
@@ -1381,7 +1371,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Not connected to any\n"));
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -1394,28 +1384,28 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undecorated_smoothed_pwdb >=
@@ -1423,18 +1413,18 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_NORMAL\n"));
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel));
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index b9736d3e9a39..2178e3761883 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 931d97979b04..c20b3c30f62e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,16 +27,13 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/firmware.h>
-#include <linux/export.h>
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
#include "../rtl8192ce/reg.h"
#include "../rtl8192ce/def.h"
#include "fw_common.h"
+#include <linux/export.h>
static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
{
@@ -172,7 +169,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 *bufferPtr = (u8 *) buffer;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size);
if (IS_CHIP_VER_B(version)) {
u32 pageNums, remainSize;
@@ -186,7 +183,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
if (pageNums > 4) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Page numbers should not greater then 4\n"));
+ "Page numbers should not greater then 4\n");
}
for (page = 0; page < pageNums; page++) {
@@ -219,13 +216,12 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32));
+ "chksum report faill ! REG_MCUFWDL:0x%08x\n", value32);
return -EIO;
}
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+ "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32);
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
@@ -238,9 +234,8 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
if (value32 & WINTINI_RDY) {
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- ("Polling FW ready success!!"
- " REG_MCUFWDL:0x%08x .\n",
- value32));
+ "Polling FW ready success!! REG_MCUFWDL:0x%08x\n",
+ value32);
return 0;
}
@@ -249,7 +244,7 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
} while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", value32);
return -EIO;
}
@@ -262,20 +257,19 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
enum version_8192c version = rtlhal->version;
- if (!rtlhal->pfirmware)
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *) rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ "Firmware Version(%d), Signature(%#x),Size(%d)\n",
le16_to_cpu(pfwheader->version),
le16_to_cpu(pfwheader->signature),
- (uint)sizeof(struct rtl92c_firmware_header)));
+ (uint)sizeof(struct rtl92c_firmware_header));
pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
@@ -287,10 +281,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
if (_rtl92c_fw_free_to_go(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Firmware is not ready to run!\n"));
+ "Firmware is not ready to run!\n");
} else {
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- ("Firmware is ready to run!\n"));
+ "Firmware is ready to run!\n");
}
return 0;
@@ -328,22 +322,22 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("H2C set in progress! Wait to set.."
- "element_id(%d).\n", element_id));
+ "H2C set in progress! Wait to set..element_id(%d)\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Wait 100 us (%d times)...\n",
- h2c_waitcounter));
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -363,8 +357,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Write H2C fail because no trigger "
- "for FW INT!\n"));
+ "Write H2C fail because no trigger for FW INT!\n");
break;
}
@@ -388,7 +381,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -398,8 +391,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Wating too long for FW read "
- "clear HMEBox(%d)!\n", boxnum));
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -408,14 +401,14 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Wating for FW read clear HMEBox(%d)!!! "
- "0x1BF = %2x\n", boxnum, u1b_tmp));
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Write H2C register BOX[%d] fail!!!!! "
- "Fw do not read.\n", boxnum));
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read\n",
+ boxnum);
break;
}
@@ -423,8 +416,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id));
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -493,7 +486,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -504,29 +497,22 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
rtlhal->last_hmeboxnum = 0;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum));
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 tmp_cmdbuf[2];
- if (rtlhal->fw_ready == false) {
- RT_ASSERT(false, ("return H2C cmd because of Fw "
- "download fail!!!\n"));
- return;
- }
-
memset(tmp_cmdbuf, 0, 8);
memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
_rtl92c_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
@@ -547,7 +533,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
while (u1b_tmp & BIT(2)) {
delay--;
if (delay == 0) {
- RT_ASSERT(false, ("8051 reset fail.\n"));
+ RT_ASSERT(false, "8051 reset fail\n");
break;
}
udelay(50);
@@ -562,7 +548,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[3] = {0};
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
@@ -570,7 +556,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
ppsc->reg_max_lps_awakeintvl);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+ "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode",
u1_h2c_set_pwrmode, 3);
rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
@@ -780,14 +766,16 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
- "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
&reserved_page_packet[0], totalpacketlen);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
u1RsvdPageLoc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
memcpy((u8 *) skb_put(skb, totalpacketlen),
&reserved_page_packet, totalpacketlen);
@@ -798,15 +786,14 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
if (dlok) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Set RSVD page location to Fw.\n"));
+ "Set RSVD page location to Fw\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "H2C_RSVDPAGE:\n",
- u1RsvdPageLoc, 3);
+ "H2C_RSVDPAGE", u1RsvdPageLoc, 3);
rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
} else
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+ "Set RSVD page location to Fw FAIL!!!!!!\n");
}
EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index cec5a3a1cc53..780ea5b1e24c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
index 605ff191aeb7..918b1d129e77 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/main.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,8 @@
*
*****************************************************************************/
-#include <linux/module.h>
#include "../wifi.h"
+#include <linux/module.h>
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 1f07558debf2..1eec3a06d1f3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -42,16 +42,15 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "bitmask(%#x)\n", regaddr,
- bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
- "Addr[0x%x]=0x%x\n", bitmask,
- regaddr, originalvalue));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
@@ -64,9 +63,9 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask,
- data));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -76,9 +75,9 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask,
- data));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
@@ -86,7 +85,7 @@ EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset)
{
- RT_ASSERT(false, ("deprecated!\n"));
+ RT_ASSERT(false, "deprecated!\n");
return 0;
}
@@ -96,7 +95,7 @@ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data)
{
- RT_ASSERT(false, ("deprecated!\n"));
+ RT_ASSERT(false, "deprecated!\n");
}
EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
@@ -114,7 +113,7 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
offset &= 0x3f;
newoffset = offset;
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
return 0xFFFFFFFF;
}
tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -144,9 +143,8 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rflssi_readback,
- retvalue));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rflssi_readback, retvalue);
return retvalue;
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
@@ -162,16 +160,15 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
return;
}
offset &= 0x3f;
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset,
- data_and_addr));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
@@ -180,7 +177,7 @@ u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
u32 i;
for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
+ if ((bitmask >> i) & 0x1)
break;
}
return i;
@@ -216,30 +213,30 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
if (rtlphy->rf_type == RF_1T2R) {
_rtl92c_phy_bb_config_1t(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
}
if (rtlefuse->autoload_failflag == false) {
rtlphy->pwrgroup_cnt = 0;
rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
@@ -256,121 +253,51 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ int index;
+
+ if (regaddr == RTXAGC_A_RATE18_06)
+ index = 0;
+ else if (regaddr == RTXAGC_A_RATE54_24)
+ index = 1;
+ else if (regaddr == RTXAGC_A_CCK1_MCS32)
+ index = 6;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00)
+ index = 7;
+ else if (regaddr == RTXAGC_A_MCS03_MCS00)
+ index = 2;
+ else if (regaddr == RTXAGC_A_MCS07_MCS04)
+ index = 3;
+ else if (regaddr == RTXAGC_A_MCS11_MCS08)
+ index = 4;
+ else if (regaddr == RTXAGC_A_MCS15_MCS12)
+ index = 5;
+ else if (regaddr == RTXAGC_B_RATE18_06)
+ index = 8;
+ else if (regaddr == RTXAGC_B_RATE54_24)
+ index = 9;
+ else if (regaddr == RTXAGC_B_CCK1_55_MCS32)
+ index = 14;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff)
+ index = 15;
+ else if (regaddr == RTXAGC_B_MCS03_MCS00)
+ index = 10;
+ else if (regaddr == RTXAGC_B_MCS07_MCS04)
+ index = 11;
+ else if (regaddr == RTXAGC_B_MCS11_MCS08)
+ index = 12;
+ else if (regaddr == RTXAGC_B_MCS15_MCS12)
+ index = 13;
+ else
+ return;
- if (regaddr == RTXAGC_A_RATE18_06) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
- }
- if (regaddr == RTXAGC_A_RATE54_24) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
- }
- if (regaddr == RTXAGC_A_CCK1_MCS32) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
- }
- if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
- }
- if (regaddr == RTXAGC_A_MCS03_MCS00) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
- }
- if (regaddr == RTXAGC_A_MCS07_MCS04) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
- }
- if (regaddr == RTXAGC_A_MCS11_MCS08) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
- }
- if (regaddr == RTXAGC_A_MCS15_MCS12) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
- }
- if (regaddr == RTXAGC_B_RATE18_06) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
- }
- if (regaddr == RTXAGC_B_RATE54_24) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
- }
- if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
- }
- if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
- }
- if (regaddr == RTXAGC_B_MCS03_MCS00) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
- }
- if (regaddr == RTXAGC_B_MCS07_MCS04) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
- }
- if (regaddr == RTXAGC_B_MCS11_MCS08) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
- }
- if (regaddr == RTXAGC_B_MCS15_MCS12) {
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
+ rtlphy->pwrgroup_cnt, index,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index]);
+ if (index == 13)
rtlphy->pwrgroup_cnt++;
- }
}
EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
@@ -389,12 +316,11 @@ void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
(u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Default initial gain (c50=0x%x, "
- "c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]));
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8) rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
@@ -402,8 +328,8 @@ void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_RXDETECTOR2, MASKDWORD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync));
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -584,7 +510,7 @@ void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
u8 cckpowerlevel[2], ofdmpowerlevel[2];
- if (rtlefuse->txpwr_fromeprom == false)
+ if (!rtlefuse->txpwr_fromeprom)
return;
_rtl92c_get_txpower_index(hw, channel,
&cckpowerlevel[0], &ofdmpowerlevel[0]);
@@ -615,8 +541,8 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
else
ofdmtxpwridx = 0;
RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
- ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
- power_indbm, ccktxpwridx, ofdmtxpwridx));
+ "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx);
for (idx = 0; idx < 14; idx++) {
for (rf_path = 0; rf_path < 2; rf_path++) {
rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
@@ -710,7 +636,7 @@ void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Unknown Scan Backup operation.\n"));
+ "Unknown Scan Backup operation\n");
break;
}
}
@@ -732,7 +658,7 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("FALSE driver sleep or unload\n"));
+ "FALSE driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -747,7 +673,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
u32 delay;
RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("switch to channel%d\n", rtlphy->current_channel));
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -765,7 +691,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
@@ -780,19 +706,18 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
if (rtlphy->set_bwmode_inprogress)
return 0;
RT_ASSERT((rtlphy->current_channel <= 14),
- ("WIRELESS_MODE_G but channel>14"));
+ "WIRELESS_MODE_G but channel>14\n");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92c_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- ("sw_chnl_inprogress false schdule workitem\n"));
+ "sw_chnl_inprogress false schdule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- ("sw_chnl_inprogress false driver sleep or"
- " unload\n"));
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -807,7 +732,7 @@ static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+ RT_ASSERT(false, "cmdtable cannot be NULL\n");
return false;
}
@@ -853,7 +778,7 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
RT_ASSERT((channel >= 1 && channel <= 14),
- ("illegal channel for Zebra: %d\n", channel));
+ "invalid channel for Zebra: %d\n", channel);
_rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -916,7 +841,7 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -1920,23 +1845,23 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
bool postprocessing = false;
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress));
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("[IO CMD] Resume DM after scan.\n"));
+ "[IO CMD] Resume DM after scan\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("[IO CMD] Pause DM before scan.\n"));
+ "[IO CMD] Pause DM before scan\n");
postprocessing = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
} while (false);
@@ -1947,7 +1872,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92c_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
return true;
}
EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
@@ -1958,8 +1883,8 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -1973,12 +1898,12 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("<---(%#x)\n", rtlphy->current_io_type));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
+ rtlphy->current_io_type);
}
EXPORT_SYMBOL(rtl92c_phy_set_io);
@@ -2018,7 +1943,7 @@ void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Switch RF timeout !!!.\n"));
+ "Switch RF timeout !!!\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index 9a264c0d6127..cec10d696492 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 9fc804d89d65..04c3aef8a4f6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 2df33e53e15a..27b3af880d96 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -54,7 +54,7 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Not connected to any\n"));
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -67,28 +67,28 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undecorated_smoothed_pwdb >=
@@ -96,18 +96,18 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_NORMAL\n"));
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel));
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 07dd9552e82f..26747fa86005 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a3deaefa788c..5c4d9bc040f1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -141,7 +141,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
@@ -207,7 +207,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -246,8 +246,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -261,8 +261,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -310,8 +310,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset));
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -348,8 +348,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("HW_VAR_ACM_CTRL acm set "
- "failed: eACI is %d\n", acm));
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -365,14 +365,14 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
- "Write 0x%X\n", acm_ctrl));
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -507,8 +507,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
- "not process\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
break;
}
}
@@ -530,8 +530,8 @@ static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
if (count > POLLING_LLT_THRESHOLD) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to polling write LLT done at "
- "address %d!\n", address));
+ "Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -669,18 +669,15 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
udelay(2);
retry = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("reg0xec:%x:%x\n",
- rtl_read_dword(rtlpriv, 0xEC),
- bytetmp));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC), bytetmp);
while ((bytetmp & BIT(0)) && retry < 1000) {
retry++;
udelay(50);
bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("reg0xec:%x:%x\n",
- rtl_read_dword(rtlpriv,
- 0xEC),
- bytetmp));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC), bytetmp);
udelay(50);
}
@@ -696,7 +693,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
- if (_rtl92ce_llt_table_init(hw) == false)
+ if (!_rtl92ce_llt_table_init(hw))
return false;
rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
@@ -864,13 +861,13 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm));
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("not open "
- "hw encryption\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -886,7 +883,7 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The SECR-value %x\n", sec_reg_value));
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -909,8 +906,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
rtlpci->being_init_adapter = true;
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
return err;
}
@@ -918,13 +915,9 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
err = rtl92c_download_fw(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Failed to download FW. Init HW "
- "without FW now..\n"));
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
- rtlhal->fw_ready = false;
return err;
- } else {
- rtlhal->fw_ready = true;
}
rtlhal->last_hmeboxnum = 0;
@@ -968,12 +961,12 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("PA BIAS path A\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(1)) && is92c) {
rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("PA BIAS path B\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path B\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -982,7 +975,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("under 1.5V\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
rtlpci->being_init_adapter = false;
@@ -995,6 +988,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
enum version_8192c version = VERSION_UNKNOWN;
u32 value32;
+ const char *versionid;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (value32 & TRP_VAUX_EN) {
@@ -1007,27 +1001,25 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
switch (version) {
case VERSION_B_CHIP_92C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+ versionid = "B_CHIP_92C";
break;
case VERSION_B_CHIP_88C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_B_CHIP_88C.\n"));
+ versionid = "B_CHIP_88C";
break;
case VERSION_A_CHIP_92C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_A_CHIP_92C.\n"));
+ versionid = "A_CHIP_92C";
break;
case VERSION_A_CHIP_88C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_A_CHIP_88C.\n"));
+ versionid = "A_CHIP_88C";
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Chip Version ID: Unknown. Bug?\n"));
+ versionid = "Unknown. Bug?";
break;
}
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: %s\n", versionid);
+
switch (version & 0x3) {
case CHIP_88C:
rtlphy->rf_type = RF_1T1R;
@@ -1041,13 +1033,12 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
default:
rtlphy->rf_type = RF_1T1R;
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("ERROR RF_Type is set!!"));
+ "ERROR RF_Type is set!!\n");
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1069,8 +1060,8 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
_rtl92ce_disable_bcn_sub_func(hw);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Set HW_VAR_MEDIA_STATUS: "
- "No such media status(%x).\n", type));
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
+ type);
}
switch (type) {
@@ -1078,27 +1069,27 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to NO LINK!\n"));
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to Ad Hoc!\n"));
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to STA!\n"));
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to AP!\n"));
+ "Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Network type %d not support!\n", type));
+ "Network type %d not supported!\n", type);
return 1;
break;
@@ -1126,7 +1117,7 @@ void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *) (&reg_rcr));
_rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
- } else if (check_bssid == false) {
+ } else if (!check_bssid) {
reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
_rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0);
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -1171,7 +1162,7 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ RT_ASSERT(false, "invalid aci: %d !\n", aci);
break;
}
}
@@ -1199,7 +1190,6 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 u1b_tmp;
u32 u4b_tmp;
@@ -1210,7 +1200,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
- if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7))
rtl92c_firmware_selfreset(hw);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
@@ -1300,7 +1290,7 @@ void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw)
u16 bcn_interval = mac->beacon_interval;
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- ("beacon_interval:%d\n", bcn_interval));
+ "beacon_interval:%d\n", bcn_interval);
rtl92ce_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl92ce_enable_interrupt(hw);
@@ -1312,8 +1302,8 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1367,25 +1357,24 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
- i,
- rtlefuse->
- eeprom_chnlarea_txpwr_cck[rf_path][i]));
+ "RF(%d) EEPROM CCK Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
- rf_path, i,
- rtlefuse->
- eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+ "RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
- rf_path, i,
- rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
- [i]));
+ "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++) {
for (i = 0; i < 14; i++) {
@@ -1416,11 +1405,11 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
for (i = 0; i < 14; i++) {
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
- "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
- rtlefuse->txpwrlevel_cck[rf_path][i],
- rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+ "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n",
+ rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i]);
}
}
@@ -1457,13 +1446,13 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
}
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
- rf_path, i,
- rtlefuse->pwrgroup_ht20[rf_path][i]));
+ "RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht20[rf_path][i]);
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
- rf_path, i,
- rtlefuse->pwrgroup_ht40[rf_path][i]));
+ "RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht40[rf_path][i]);
}
}
@@ -1502,27 +1491,27 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+ "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+ "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+ "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+ "RF-B Legacy to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
if (!autoload_fail)
rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
else
rtlefuse->eeprom_regulatory = 0;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
if (!autoload_fail) {
rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
@@ -1531,10 +1520,9 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
}
- RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
- rtlefuse->eeprom_tssi[RF90_PATH_A],
- rtlefuse->eeprom_tssi[RF90_PATH_B]));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+ rtlefuse->eeprom_tssi[RF90_PATH_A],
+ rtlefuse->eeprom_tssi[RF90_PATH_B]);
if (!autoload_fail)
tempval = hwinfo[EEPROM_THERMAL_METER];
@@ -1547,7 +1535,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
}
static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
@@ -1567,19 +1555,19 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
HWSET_MAX_SIZE);
} else if (rtlefuse->epromtype == EEPROM_93C46) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("RTL819X Not boot from eeprom, check it !!"));
+ "RTL819X Not boot from eeprom, check it !!");
}
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE);
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1591,8 +1579,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
*((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("%pM\n", rtlefuse->dev_addr));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
_rtl92ce_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
@@ -1608,7 +1595,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
@@ -1662,7 +1649,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
break;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1679,22 +1666,22 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
- rtlhal->version));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92ce_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
}
_rtl92ce_hal_customized_behavior(hw);
}
@@ -1790,8 +1777,8 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -1919,16 +1906,15 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
break;
}
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- ("ratr_bitmap :%x\n", ratr_bitmap));
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
(ratr_index << 28));
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
- "ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4]));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
+ rate_mask[4]);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
if (macid != 0)
@@ -1994,15 +1980,14 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("GPIOChangeRF - HW Radio ON, RF ON\n"));
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
- } else if ((ppsc->hwradiooff == false)
- && (e_rfpowerstate_toset == ERFOFF)) {
+ } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2053,7 +2038,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2081,8 +2066,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
- "not process\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
enc_algo = CAM_TKIP;
break;
}
@@ -2100,9 +2085,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- ("Can not find free hw"
- " security cam entry\n"));
+ DBG_EMERG,
+ "Can not find free hw security cam entry\n");
return;
}
} else {
@@ -2116,31 +2100,31 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("delete one entry, entry_id is %d\n",
- entry_id));
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]));
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("add one entry\n"));
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
- "Pairwiase Key content :",
+ "Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set Pairwiase key\n"));
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -2149,7 +2133,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set group key\n"));
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 07dbe3e340a5..52a3aea9b3de 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
index 28a1a707d09c..8283e9b27639 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -45,8 +45,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -62,7 +62,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = true;
@@ -74,8 +74,8 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -97,7 +97,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = false;
@@ -145,7 +145,7 @@ void rtl92ce_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d.\n",
- ledaction));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n",
+ ledaction);
_rtl92ce_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
index 7dfccea2095b..c5761066d383 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 3b585aadabfc..88deae67cc14 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -47,9 +47,9 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -67,9 +67,8 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("regaddr(%#x), rfpath(%#x), "
- "bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value));
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -121,8 +120,8 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
u32 original_value, bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath));
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -153,10 +152,9 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "bitmask(%#x), data(%#x), "
- "rfpath(%#x)\n", regaddr,
- bitmask, data, rfpath));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
@@ -166,11 +164,10 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = MAC_2T_ARRAYLENGTH;
ptrarray = RTL8192CEMAC_2T_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -215,10 +212,9 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
phy_regarray_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The phy_regarray_table[0] is %x"
- " Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]));
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
@@ -226,10 +222,9 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
agctab_array_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The agctab_array_table[0] is "
- "%x Rtl819XPHY_REGArray[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]));
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -269,7 +264,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
} else {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- ("configtype != BaseBand_Config_PHY_REG\n"));
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -291,20 +286,20 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radiob_arraylen = RADIOB_2TARRAYLENGTH;
radiob_array_table = RTL8192CE_RADIOB_2TARRAY;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+ "Radio_A:RTL8192CERADIOA_2TARRAY\n");
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+ "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
} else {
radioa_arraylen = RADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8192CE_RADIOA_1TARRAY;
radiob_arraylen = RADIOB_1TARRAYLENGTH;
radiob_array_table = RTL8192CE_RADIOB_1TARRAY;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+ "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+ "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -352,11 +347,11 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
return true;
@@ -371,10 +366,9 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz"))
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -398,7 +392,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
@@ -423,12 +417,12 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
@@ -499,7 +493,7 @@ static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Switch RF timeout !!!.\n"));
+ "Switch RF timeout !!!\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
@@ -526,18 +520,17 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
InitializeCount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic enable\n"));
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while ((rtstatus != true)
- && (InitializeCount < 10));
+ } while (!rtstatus && (InitializeCount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies)));
+ "Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ce_phy_set_rf_on(hw);
}
@@ -553,7 +546,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
case ERFOFF:{
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic disable\n"));
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -578,35 +571,33 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
continue;
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("eRf Off/Sleep: %d times "
- "TcbBusyQueue[%d] =%d before "
- "doze!\n", (i + 1), queue_id,
- skb_queue_len(&ring->queue)));
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("\n ERFSLEEP: %d times "
- "TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue)));
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies)));
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92ce_phy_set_rf_sleep(hw);
break;
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
bresult = false;
break;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index be2c92adef33..d5e3b704f930 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index ba5ff0411f0a..e4d738f6166d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1190,7 +1190,6 @@
#define USB_AGG_EN BIT(3)
-#define MAC_ADDR_LEN 6
#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
#define POLLING_LLT_THRESHOLD 20
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index d3b01e6023ba..54c7614958a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -56,7 +56,7 @@ void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", bandwidth));
+ "unknown bandwidth: %#X\n", bandwidth);
break;
}
}
@@ -123,8 +123,8 @@ void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_A_CCK1_MCS32));
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_A_CCK1_MCS32);
tmpval = tx_agc[RF90_PATH_A] >> 8;
@@ -133,22 +133,22 @@ void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK11_A_CCK2_11));
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] >> 24;
rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK11_A_CCK2_11));
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK1_55_MCS32));
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK1_55_MCS32);
}
static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
@@ -171,8 +171,8 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
(powerBase0 << 8) | powerBase0;
*(ofdmbase + i) = powerBase0;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- (" [OFDM power base index rf(%c) = 0x%x]\n",
- ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(ofdmbase + i));
}
for (i = 0; i < 2; i++) {
@@ -187,8 +187,8 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
*(mcsbase + i) = powerBase1;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- (" [MCS power base index rf(%c) = 0x%x]\n",
- ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(mcsbase + i));
}
}
@@ -215,9 +215,8 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("RTK better performance, "
- "writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "RTK better performance, writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
case 1:
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
@@ -225,9 +224,8 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Realtek regulatory, 40MHz, "
- "writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Realtek regulatory, 40MHz, writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
} else {
if (rtlphy->pwrgroup_cnt == 1)
chnlgroup = 0;
@@ -249,9 +247,8 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Realtek regulatory, 20MHz, "
- "writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
}
break;
case 2:
@@ -259,27 +256,24 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
((index < 2) ? powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Better regulatory, "
- "writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Better regulatory, writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
case 3:
chnlgroup = 0;
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("customer's limit, 40MHz "
- "rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'),
- rtlefuse->pwrgroup_ht40[rf][channel -
- 1]));
+ "customer's limit, 40MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
+ rtlefuse->pwrgroup_ht40[rf][channel -
+ 1]);
} else {
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("customer's limit, 20MHz "
- "rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'),
- rtlefuse->pwrgroup_ht20[rf][channel -
- 1]));
+ "customer's limit, 20MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
+ rtlefuse->pwrgroup_ht20[rf][channel -
+ 1]);
}
for (i = 0; i < 4; i++) {
pwr_diff_limit[i] =
@@ -311,15 +305,15 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
(pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Customer's limit rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), customer_limit));
+ "Customer's limit rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', customer_limit);
writeVal = customer_limit +
((index < 2) ? powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Customer, writeVal rf(%c)= 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Customer, writeVal rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
default:
chnlgroup = 0;
@@ -329,9 +323,8 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("RTK better performance, writeVal "
- "rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "RTK better performance, writeVal rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
}
@@ -383,7 +376,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Set 0x%x = %08x\n", regoffset, writeVal));
+ "Set 0x%x = %08x\n", regoffset, writeVal);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
(regoffset == RTXAGC_A_MCS15_MCS12 ||
@@ -510,14 +503,14 @@ static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio[%d] Fail!!", rfpath));
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 39ff03685986..6c8d56efceae 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 89ef6982ce50..2c3b73366cd2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,9 +27,6 @@
*
*****************************************************************************/
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
@@ -43,6 +40,8 @@
#include "trx.h"
#include "led.h"
+#include <linux/module.h>
+
static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -92,9 +91,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
int err;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- const struct firmware *firmware;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- char *fw_name = NULL;
rtl8192ce_bt_reg_init(hw);
@@ -159,33 +156,27 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
if (!rtlpriv->rtlhal.pfirmware) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't alloc buffer for fw.\n"));
+ "Can't alloc buffer for fw\n");
return 1;
}
/* request fw */
if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
!IS_92C_SERIAL(rtlhal->version))
- fw_name = "rtlwifi/rtl8192cfwU.bin";
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
- fw_name = "rtlwifi/rtl8192cfwU_B.bin";
- else
- fw_name = rtlpriv->cfg->fw_name;
- err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+
+ rtlpriv->max_fw_size = 0x4000;
+ pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to request firmware!\n"));
- return 1;
- }
- if (firmware->size > 0x4000) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Firmware is too big!\n"));
- release_firmware(firmware);
+ "Failed to request firmware!\n");
return 1;
}
- memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
- rtlpriv->rtlhal.fwsize = firmware->size;
- release_firmware(firmware);
return 0;
}
@@ -404,7 +395,7 @@ static int __init rtl92ce_module_init(void)
ret = pci_register_driver(&rtl92ce_driver);
if (ret)
- RT_ASSERT(false, (": No device found\n"));
+ RT_ASSERT(false, "No device found\n");
return ret;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
index b7dc3263e433..d2367a5d0cf5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
index ba938b91aa6f..752f943a84ae 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
index 3a6e8b6aeee0..8b79161f71be 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 4fb5ae24dee0..37b13636a778 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -725,7 +725,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- ("Enable RDG function.\n"));
+ "Enable RDG function\n");
SET_TX_DESC_RDG_ENABLE(pdesc, 1);
SET_TX_DESC_HTC(pdesc, 1);
}
@@ -763,7 +763,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_BMC(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -821,8 +821,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C Tx Cmd Content\n",
- pdesc, TX_DESC_SIZE);
+ "H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
}
void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
@@ -837,8 +836,8 @@ void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, ("ERR txdesc :%d"
- " not process\n", desc_name));
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -857,8 +856,8 @@ void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, ("ERR rxdesc :%d "
- "not process\n", desc_name));
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
@@ -877,8 +876,8 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
break;
default:
- RT_ASSERT(false, ("ERR txdesc :%d "
- "not process\n", desc_name));
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -891,8 +890,8 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_RX_DESC_PKT_LEN(pdesc);
break;
default:
- RT_ASSERT(false, ("ERR rxdesc :%d "
- "not process\n", desc_name));
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index c8977a50ca36..efb9ab270403 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
index d097efb1e717..f916555e6311 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index f311baee668d..6fd39eaf361e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -52,7 +52,7 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Not connected to any\n"));
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -65,28 +65,28 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undecorated_smoothed_pwdb >=
@@ -94,18 +94,18 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_NORMAL\n"));
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel));
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
index 7f966c666b5a..d947e7d350bb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 124cf633861c..0c74d4f2eeb4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "../wifi.h"
#include "../efuse.h"
#include "../base.h"
@@ -162,24 +160,24 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
- i, rtlefuse->
- eeprom_chnlarea_txpwr_cck[rf_path][i]));
+ "RF(%d) EEPROM CCK Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
- rf_path, i,
- rtlefuse->
- eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+ "RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
- rf_path, i,
- rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
- [i]));
+ "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++) {
for (i = 0; i < 14; i++) {
index = _rtl92c_get_chnl_group((u8) i);
@@ -205,11 +203,10 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
}
for (i = 0; i < 14; i++) {
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
- "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
- rtlefuse->txpwrlevel_cck[rf_path][i],
- rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+ "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i]);
}
}
for (i = 0; i < 3; i++) {
@@ -242,13 +239,13 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
& 0xf0) >> 4);
}
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
- rf_path, i,
- rtlefuse->pwrgroup_ht20[rf_path][i]));
+ "RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht20[rf_path][i]);
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
- rf_path, i,
- rtlefuse->pwrgroup_ht40[rf_path][i]));
+ "RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht40[rf_path][i]);
}
}
for (i = 0; i < 14; i++) {
@@ -277,26 +274,26 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+ "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+ "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+ "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+ "RF-B Legacy to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
if (!autoload_fail)
rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
else
rtlefuse->eeprom_regulatory = 0;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
if (!autoload_fail) {
rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
@@ -305,9 +302,9 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
}
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
- rtlefuse->eeprom_tssi[RF90_PATH_A],
- rtlefuse->eeprom_tssi[RF90_PATH_B]));
+ "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+ rtlefuse->eeprom_tssi[RF90_PATH_A],
+ rtlefuse->eeprom_tssi[RF90_PATH_B]);
if (!autoload_fail)
tempval = hwinfo[EEPROM_THERMAL_METER];
else
@@ -320,7 +317,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->apk_thermalmeterignore = true;
rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
}
static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
@@ -340,144 +337,8 @@ static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
if (IS_HIGHT_PA(rtlefuse->board_type))
rtlefuse->external_pa = 1;
pr_info("Board Type %x\n", rtlefuse->board_type);
-
-#ifdef CONFIG_ANTENNA_DIVERSITY
- /* Antenna Diversity setting. */
- if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
- rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
- else
- rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
-
- pr_info("Antenna Config %x\n", rtl_efuse->antenna_cfg);
-#endif
-}
-
-#ifdef CONFIG_BT_COEXIST
-static void _update_bt_param(_adapter *padapter)
-{
- struct btcoexist_priv *pbtpriv = &(padapter->halpriv.bt_coexist);
- struct registry_priv *registry_par = &padapter->registrypriv;
- if (2 != registry_par->bt_iso) {
- /* 0:Low, 1:High, 2:From Efuse */
- pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
- }
- if (registry_par->bt_sco == 1) {
- /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
- * 5.OtherBusy */
- pbtpriv->BT_Service = BT_OtherAction;
- } else if (registry_par->bt_sco == 2) {
- pbtpriv->BT_Service = BT_SCO;
- } else if (registry_par->bt_sco == 4) {
- pbtpriv->BT_Service = BT_Busy;
- } else if (registry_par->bt_sco == 5) {
- pbtpriv->BT_Service = BT_OtherBusy;
- } else {
- pbtpriv->BT_Service = BT_Idle;
- }
- pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
- pbtpriv->bCOBT = _TRUE;
- pbtpriv->BtEdcaUL = 0;
- pbtpriv->BtEdcaDL = 0;
- pbtpriv->BtRssiState = 0xff;
- pbtpriv->bInitSet = _FALSE;
- pbtpriv->bBTBusyTraffic = _FALSE;
- pbtpriv->bBTTrafficModeSet = _FALSE;
- pbtpriv->bBTNonTrafficModeSet = _FALSE;
- pbtpriv->CurrentState = 0;
- pbtpriv->PreviousState = 0;
- pr_info("BT Coexistance = %s\n",
- (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
- if (pbtpriv->BT_Coexist) {
- if (pbtpriv->BT_Ant_Num == Ant_x2)
- pr_info("BlueTooth BT_Ant_Num = Antx2\n");
- else if (pbtpriv->BT_Ant_Num == Ant_x1)
- pr_info("BlueTooth BT_Ant_Num = Antx1\n");
- switch (pbtpriv->BT_CoexistType) {
- case BT_2Wire:
- pr_info("BlueTooth BT_CoexistType = BT_2Wire\n");
- break;
- case BT_ISSC_3Wire:
- pr_info("BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
- break;
- case BT_Accel:
- pr_info("BlueTooth BT_CoexistType = BT_Accel\n");
- break;
- case BT_CSR_BC4:
- pr_info("BlueTooth BT_CoexistType = BT_CSR_BC4\n");
- break;
- case BT_CSR_BC8:
- pr_info("BlueTooth BT_CoexistType = BT_CSR_BC8\n");
- break;
- case BT_RTL8756:
- pr_info("BlueTooth BT_CoexistType = BT_RTL8756\n");
- break;
- default:
- pr_info("BlueTooth BT_CoexistType = Unknown\n");
- break;
- }
- pr_info("BlueTooth BT_Ant_isolation = %d\n",
- pbtpriv->BT_Ant_isolation);
- switch (pbtpriv->BT_Service) {
- case BT_OtherAction:
- pr_info("BlueTooth BT_Service = BT_OtherAction\n");
- break;
- case BT_SCO:
- pr_info("BlueTooth BT_Service = BT_SCO\n");
- break;
- case BT_Busy:
- pr_info("BlueTooth BT_Service = BT_Busy\n");
- break;
- case BT_OtherBusy:
- pr_info("BlueTooth BT_Service = BT_OtherBusy\n");
- break;
- default:
- pr_info("BlueTooth BT_Service = BT_Idle\n");
- break;
- }
- pr_info("BT_RadioSharedType = 0x%x\n",
- pbtpriv->BT_RadioSharedType);
- }
}
-#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
-
-static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
- u8 *contents,
- bool bautoloadfailed);
-{
- HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter);
- bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
- struct btcoexist_priv *pbtpriv = &pHalData->bt_coexist;
- u8 rf_opt4;
-
- _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
- if (AutoloadFail) {
- pbtpriv->BT_Coexist = _FALSE;
- pbtpriv->BT_CoexistType = BT_2Wire;
- pbtpriv->BT_Ant_Num = Ant_x2;
- pbtpriv->BT_Ant_isolation = 0;
- pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
- return;
- }
- if (isNormal) {
- if (pHalData->BoardType == BOARD_USB_COMBO)
- pbtpriv->BT_Coexist = _TRUE;
- else
- pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
- 0x20) >> 5); /* bit[5] */
- rf_opt4 = PROMContent[EEPROM_RF_OPT4];
- pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
- pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
- pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
- pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
- } else {
- pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
- _TRUE : _FALSE;
- }
- _update_bt_param(Adapter);
-}
-#endif
-
static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -494,17 +355,17 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
HWSET_MAX_SIZE);
} else if (rtlefuse->epromtype == EEPROM_93C46) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("RTL819X Not boot from eeprom, check it !!"));
+ "RTL819X Not boot from eeprom, check it !!\n");
}
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP",
hwinfo, HWSET_MAX_SIZE);
eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
if (rtlefuse->autoload_failflag)
@@ -518,16 +379,15 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag, hwinfo);
rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- (" VID = 0x%02x PID = 0x%02x\n",
- rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
+ rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version =
le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
+ rtlefuse->eeprom_oemid);
if (rtlhal->oem_id == RT_CID_DEFAULT) {
switch (rtlefuse->eeprom_oemid) {
case EEPROM_CID_DEFAULT:
@@ -554,10 +414,6 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
}
}
_rtl92cu_read_board_type(hw, hwinfo);
-#ifdef CONFIG_BT_COEXIST
- _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
- rtlefuse->autoload_failflag);
-#endif
}
static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
@@ -579,8 +435,8 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n",
+ rtlhal->oem_id);
}
void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
@@ -596,11 +452,11 @@ void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
rtlefuse->epromtype = (tmp_u1b & BOOT_FROM_EEPROM) ?
EEPROM_93C46 : EEPROM_BOOT_EFUSE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
- (tmp_u1b & BOOT_FROM_EEPROM) ? "EERROM" : "EFUSE"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from %s\n",
+ tmp_u1b & BOOT_FROM_EEPROM ? "EERROM" : "EFUSE");
rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
- (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload %s\n",
+ tmp_u1b & EEPROM_EN ? "OK!!" : "ERR!!");
_rtl92cu_read_adapter_info(hw);
_rtl92cu_hal_customized_behavior(hw);
return;
@@ -618,13 +474,12 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
do {
if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Autoload Done!\n"));
+ "Autoload Done!\n");
break;
}
if (pollingCount++ > 100) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
- " done!\n"));
+ "Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n");
return -ENODEV;
}
} while (true);
@@ -639,8 +494,8 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
value8 |= LDV12_EN;
rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
- value8));
+ " power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x\n",
+ value8);
udelay(100);
value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
value8 &= ~ISO_MD2PP;
@@ -658,8 +513,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
}
if (pollingCount++ > 100) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
- " done!\n"));
+ "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
return -ENODEV;
}
} while (true);
@@ -877,8 +731,8 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
hiQ = QUEUE_HIGH;
}
_rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Tx queue select :0x%02x..\n", queue_sel));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n",
+ queue_sel);
}
static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
@@ -937,8 +791,8 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
break;
}
rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Tx queue select :0x%02x..\n", hq_sele));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n",
+ hq_sele);
}
static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
@@ -998,7 +852,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to init power on!\n"));
+ "Failed to init power on!\n");
return err;
}
if (!wmm_enable) {
@@ -1010,7 +864,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
}
if (false == rtl92c_init_llt_table(hw, boundary)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to init LLT Table!\n"));
+ "Failed to init LLT Table!\n");
return -EINVAL;
}
_rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
@@ -1043,12 +897,12 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm));
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("not open sw encryption\n"));
+ "not open sw encryption\n");
return;
}
sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
@@ -1059,8 +913,8 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
if (IS_NORMAL_CHIP(rtlhal->version))
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The SECR-value %x\n", sec_reg_value));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
+ sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1111,34 +965,6 @@ static void _InitPABias(struct ieee80211_hw *hw)
}
}
-static void _InitAntenna_Selection(struct ieee80211_hw *hw)
-{
-#ifdef CONFIG_ANTENNA_DIVERSITY
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- if (pHalData->AntDivCfg == 0)
- return;
-
- if (rtlphy->rf_type == RF_1T1R) {
- rtl_write_dword(rtlpriv, REG_LEDCFG0,
- rtl_read_dword(rtlpriv,
- REG_LEDCFG0)|BIT(23));
- rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
- if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
- Antenna_A)
- pHalData->CurAntenna = Antenna_A;
- else
- pHalData->CurAntenna = Antenna_B;
- }
-#endif
-}
-
-static void _dump_registers(struct ieee80211_hw *hw)
-{
-}
-
static void _update_mac_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1163,18 +989,15 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
err = _rtl92cu_init_mac(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
return err;
}
err = rtl92c_download_fw(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Failed to download FW. Init HW without FW now..\n"));
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
- rtlhal->fw_ready = false;
return err;
- } else {
- rtlhal->fw_ready = true;
}
rtlhal->last_hmeboxnum = 0; /* h2c */
_rtl92cu_phy_param_tab_init(hw);
@@ -1209,10 +1032,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
}
_rtl92cu_hw_configure(hw);
_InitPABias(hw);
- _InitAntenna_Selection(hw);
_update_mac_setting(hw);
rtl92c_dm_init(hw);
- _dump_registers(hw);
return err;
}
@@ -1270,24 +1091,21 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
/* reset MCU ready status */
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
- if (rtlhal->fw_ready) {
- /* 8051 reset by self */
- rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
- while ((retry_cnts++ < 100) &&
- (FEN_CPUEN & rtl_read_word(rtlpriv,
- REG_SYS_FUNC_EN))) {
- udelay(50);
- }
- if (retry_cnts >= 100) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("#####=> 8051 reset failed!.."
- ".......................\n"););
- /* if 8051 reset fail, reset MAC. */
- rtl_write_byte(rtlpriv,
- REG_SYS_FUNC_EN + 1,
- 0x50);
- udelay(100);
- }
+ /* 8051 reset by self */
+ rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
+ while ((retry_cnts++ < 100) &&
+ (FEN_CPUEN & rtl_read_word(rtlpriv,
+ REG_SYS_FUNC_EN))) {
+ udelay(50);
+ }
+ if (retry_cnts >= 100) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "#####=> 8051 reset failed!.........................\n");
+ /* if 8051 reset fail, reset MAC. */
+ rtl_write_byte(rtlpriv,
+ REG_SYS_FUNC_EN + 1,
+ 0x50);
+ udelay(100);
}
}
/* Reset MAC and Enable 8051 */
@@ -1495,35 +1313,36 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
_rtl92cu_resume_tx_beacon(hw);
_rtl92cu_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
- "STATUS:No such media status(%x).\n", type));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS:No such media status(%x)\n",
+ type);
}
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to NO LINK!\n"));
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to Ad Hoc!\n"));
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to STA!\n"));
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to AP!\n"));
+ "Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Network type %d not support!\n", type));
+ "Network type %d not supported!\n", type);
goto error_out;
}
rtl_write_byte(rtlpriv, (MSR), bt_msr);
@@ -1684,8 +1503,8 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
value32 |= TSFRST;
rtl_write_dword(rtlpriv, REG_TCR, value32);
RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
- ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
- value32));
+ "SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+ value32);
/* TODO: Modify later (Find the right parameters)
* NOTE: Fix test chip's bug (about contention windows's randomness) */
if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
@@ -1702,8 +1521,8 @@ void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- ("beacon_interval:%d\n", bcn_interval));
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n",
+ bcn_interval);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
}
@@ -1767,7 +1586,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
@@ -1827,8 +1646,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("HW_VAR_SIFS\n"));
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SIFS\n");
break;
}
case HW_VAR_SLOT_TIME:{
@@ -1837,7 +1655,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
if (QOS_MODE) {
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -1901,8 +1719,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
min_spacing_to_set);
*val = min_spacing_to_set;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
@@ -1916,8 +1734,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg &= 0x07;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
@@ -1950,8 +1768,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
p_regtoset[index]);
}
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset));
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -1969,8 +1787,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
AC_PARAM_ECW_MAX_OFFSET);
u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("queue:%x, ac_param:%x\n", e_aci,
- u4b_ac_param));
+ "queue:%x, ac_param:%x\n",
+ e_aci, u4b_ac_param);
switch (e_aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
@@ -1989,8 +1807,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u4b_ac_param);
break;
default:
- RT_ASSERT(false, ("SetHwReg8185(): invalid"
- " aci: %d !\n", e_aci));
+ RT_ASSERT(false,
+ "SetHwReg8185(): invalid aci: %d !\n",
+ e_aci);
break;
}
if (rtlusb->acm_method != eAcmWay2_SW)
@@ -2020,8 +1839,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("HW_VAR_ACM_CTRL acm set "
- "failed: eACI is %d\n", acm));
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -2037,13 +1856,13 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
- "Write 0x%X\n", acm_ctrl));
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -2051,7 +1870,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
mac->rx_conf = ((u32 *) (val))[0];
RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
+ "### Set RCR(0x%08x) ###\n", mac->rx_conf);
break;
}
case HW_VAR_RETRY_LIMIT:{
@@ -2060,8 +1879,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
retry_limit << RETRY_LIMIT_LONG_SHIFT);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
- "ETRY_LIMIT(0x%08x)\n", retry_limit));
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG,
+ "Set HW_VAR_RETRY_LIMIT(0x%08x)\n",
+ retry_limit);
break;
}
case HW_VAR_DUAL_TSF_RST:
@@ -2165,8 +1985,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
- "not process\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
break;
}
}
@@ -2239,8 +2059,8 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
(shortgi_rate << 4) | (shortgi_rate);
}
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
- REG_ARFR0)));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
@@ -2344,17 +2164,16 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
ratr_bitmap &= 0x0f0ff0ff;
break;
}
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
- ratr_bitmap));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n",
+ ratr_bitmap);
*(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
- "ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4]));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
+ rate_mask[4]);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
}
@@ -2404,7 +2223,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
ERFOFF : ERFON;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
+ "pwrdown, 0x5c(BIT7)=%02x\n", u1tmp);
} else {
rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
rtl_read_byte(rtlpriv,
@@ -2413,27 +2232,26 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
ERFON : ERFOFF;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("GPIO_IN=%02x\n", u1tmp));
+ "GPIO_IN=%02x\n", u1tmp);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
- e_rfpowerstate_toset));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "N-SS RF =%x\n",
+ e_rfpowerstate_toset);
}
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW "
- "Radio ON, RF ON\n"));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
ppsc->hwradiooff = false;
actuallyset = true;
} else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW"
- " Radio OFF\n"));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "GPIOChangeRF - HW Radio OFF\n");
ppsc->hwradiooff = true;
actuallyset = true;
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
- ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
- " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
- "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "pHalData->bHwRadioOff and eRfPowerStateToSet do not match: pHalData->bHwRadioOff %x, eRfPowerStateToSet %x\n",
+ ppsc->hwradiooff, e_rfpowerstate_toset);
}
if (actuallyset) {
ppsc->hwradiooff = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 32f85cba106a..f41a3aa4a26f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
index 2ff9d8314e7b..75a2deb23af1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -47,8 +47,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -62,7 +62,7 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = true;
@@ -74,8 +74,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -95,7 +95,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = false;
@@ -136,7 +136,6 @@ void rtl92cu_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
- ledaction));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92cu_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
index decaee4d1eb1..0f372278b7af 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 9e0c8fcdf90f..025bdc2eba44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,10 +27,6 @@
*
****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-
#include "../wifi.h"
#include "../pci.h"
#include "../usb.h"
@@ -44,6 +40,8 @@
#include "mac.h"
#include "trx.h"
+#include <linux/module.h>
+
/* macro to shorten lines */
#define LINK_Q ui_link_quality
@@ -57,6 +55,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
enum version_8192c chip_version = VERSION_UNKNOWN;
+ const char *versionid;
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
@@ -84,88 +83,69 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
}
rtlhal->version = (enum version_8192c)chip_version;
- pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
+ pr_info("Chip version 0x%x\n", chip_version);
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+ versionid = "NORMAL_B_CHIP_92C";
break;
case VERSION_NORMAL_TSMC_CHIP_92C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
+ versionid = "NORMAL_TSMC_CHIP_92C";
break;
case VERSION_NORMAL_TSMC_CHIP_88C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
+ versionid = "NORMAL_TSMC_CHIP_88C";
break;
case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
- "92C_1T2R_A_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_i92C_1T2R_A_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
- "92C_A_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_92C_A_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
- "_88C_A_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_88C_A_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
- "_92C_1T2R_B_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_92C_1T2R_B_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
- "_92C_B_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_92C_B_CUT";
break;
case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
- "_88C_B_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
break;
case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
- "_8723_1T1R_A_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
break;
case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
- "_8723_1T1R_B_CUT.\n"));
+ versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
break;
case VERSION_TEST_CHIP_92C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
+ versionid = "TEST_CHIP_92C";
break;
case VERSION_TEST_CHIP_88C:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
+ versionid = "TEST_CHIP_88C";
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Chip Version ID: ???????????????.\n"));
+ versionid = "UNKNOWN";
break;
}
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: %s\n", versionid);
+
if (IS_92C_SERIAL(rtlhal->version))
rtlphy->rf_type =
(IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
else
rtlphy->rf_type = RF_1T1R;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R"));
+ "Chip RF Type: %s\n",
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
- rtlhal->version));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
}
/**
@@ -192,9 +172,8 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to polling write LLT done at"
- " address %d! _LLT_OP_VALUE(%x)\n",
- address, _LLT_OP_VALUE(value)));
+ "Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n",
+ address, _LLT_OP_VALUE(value));
status = false;
break;
}
@@ -272,7 +251,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
@@ -298,7 +277,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("iillegal switch case\n"));
+ "illegal switch case\n");
enc_algo = CAM_TKIP;
break;
}
@@ -317,26 +296,26 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("delete one entry\n"));
+ "delete one entry\n");
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]));
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("add one entry\n"));
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
- "Pairwiase Key content :",
+ "Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set Pairwiase key\n"));
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -345,7 +324,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set group key\n"));
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
@@ -421,8 +400,8 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
AC_PARAM_ECW_MAX_OFFSET;
u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
AC_PARAM_TXOP_OFFSET;
- RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
- ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
+ aci, u4b_ac_param);
switch (aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
@@ -437,7 +416,7 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
break;
default:
- RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ RT_ASSERT(false, "invalid aci: %d !\n", aci);
break;
}
}
@@ -453,14 +432,14 @@ void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
for (i = 0 ; i < ETH_ALEN ; i++)
rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
- "%02X-%02X-%02X\n",
- rtl_read_byte(rtlpriv, REG_MACID),
- rtl_read_byte(rtlpriv, REG_MACID+1),
- rtl_read_byte(rtlpriv, REG_MACID+2),
- rtl_read_byte(rtlpriv, REG_MACID+3),
- rtl_read_byte(rtlpriv, REG_MACID+4),
- rtl_read_byte(rtlpriv, REG_MACID+5)));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
+ "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
+ rtl_read_byte(rtlpriv, REG_MACID),
+ rtl_read_byte(rtlpriv, REG_MACID+1),
+ rtl_read_byte(rtlpriv, REG_MACID+2),
+ rtl_read_byte(rtlpriv, REG_MACID+3),
+ rtl_read_byte(rtlpriv, REG_MACID+4),
+ rtl_read_byte(rtlpriv, REG_MACID+5));
}
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
@@ -478,26 +457,26 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
case NL80211_IFTYPE_UNSPECIFIED:
value = NT_NO_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Set Network type to NO LINK!\n"));
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
value = NT_LINK_AD_HOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Set Network type to Ad Hoc!\n"));
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
value = NT_LINK_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Set Network type to STA!\n"));
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
value = NT_AS_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Set Network type to AP!\n"));
+ "Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Network type %d not support!\n", type));
+ "Network type %d not supported!\n", type);
return -EOPNOTSUPP;
}
rtl_write_byte(rtlpriv, (REG_CR + 2), value);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index 626d88e88e26..bf53652e4edd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index e49cf2244c75..34e56308301e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -44,9 +44,9 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath, regaddr);
@@ -57,9 +57,8 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("regaddr(%#x), rfpath(%#x), "
- "bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value));
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -72,8 +71,8 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
u32 original_value, bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath));
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92c_phy_rf_serial_read(hw,
@@ -97,9 +96,9 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
}
_rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
@@ -152,11 +151,10 @@ bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -202,10 +200,9 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
phy_regarray_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The phy_regarray_table[0] is %x"
- " Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]));
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
@@ -213,10 +210,9 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
agctab_array_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The agctab_array_table[0] is "
- "%x Rtl819XPHY_REGArray[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]));
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -255,7 +251,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
} else {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- ("configtype != BaseBand_Config_PHY_REG\n"));
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -277,20 +273,20 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+ "Radio_A:RTL8192CERADIOA_2TARRAY\n");
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+ "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
} else {
radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+ "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+ "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -338,11 +334,11 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
return true;
@@ -357,10 +353,9 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz"))
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
return;
@@ -381,7 +376,7 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
switch (rtlphy->current_chan_bw) {
@@ -403,12 +398,12 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
@@ -480,18 +475,16 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
InitializeCount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic enable\n"));
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while ((rtstatus != true)
- && (InitializeCount < 10));
+ } while (!rtstatus && (InitializeCount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies)));
+ "Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ce_phy_set_rf_on(hw);
}
@@ -513,27 +506,25 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
continue;
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("eRf Off/Sleep: %d times "
- "TcbBusyQueue[%d] "
- "=%d before doze!\n", (i + 1),
- queue_id,
- skb_queue_len(&ring->queue)));
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1,
+ queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("\nERFOFF: %d times "
- "TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue)));
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic disable\n"));
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -557,33 +548,30 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
continue;
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("eRf Off/Sleep: %d times "
- "TcbBusyQueue[%d] =%d before "
- "doze!\n", (i + 1), queue_id,
- skb_queue_len(&ring->queue)));
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("\n ERFSLEEP: %d times "
- "TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue)));
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies)));
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92c_phy_set_rf_sleep(hw);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
bresult = false;
break;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
index ff81a61729d7..42b068660483 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
index 7f1be614c998..8b81465c629b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 1e851aae58db..506b9a078ed1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -56,7 +56,7 @@ void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", bandwidth));
+ "unknown bandwidth: %#X\n", bandwidth);
break;
}
}
@@ -140,26 +140,26 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_A_CCK1_MCS32));
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_A_CCK1_MCS32);
tmpval = tx_agc[RF90_PATH_A] >> 8;
if (mac->mode == WIRELESS_MODE_B)
tmpval = tmpval & 0xff00ffff;
rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK11_A_CCK2_11));
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] >> 24;
rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK11_A_CCK2_11));
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK1_55_MCS32));
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK1_55_MCS32);
}
static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
@@ -181,8 +181,8 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
(powerBase0 << 8) | powerBase0;
*(ofdmbase + i) = powerBase0;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- (" [OFDM power base index rf(%c) = 0x%x]\n",
- ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(ofdmbase + i));
}
for (i = 0; i < 2; i++) {
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
@@ -194,8 +194,8 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
(powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
*(mcsbase + i) = powerBase1;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- (" [MCS power base index rf(%c) = 0x%x]\n",
- ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(mcsbase + i));
}
}
@@ -219,8 +219,8 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
[chnlgroup][index + (rf ? 8 : 0)]
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("RTK better performance,writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "RTK better performance,writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
case 1:
if (rtlphy->pwrgroup_cnt == 1)
@@ -244,32 +244,31 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
((index < 2) ? powerBase0[rf] :
powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Realtek regulatory, 20MHz, "
- "writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
case 2:
writeVal = ((index < 2) ? powerBase0[rf] :
powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Better regulatory,writeVal(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Better regulatory,writeVal(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
case 3:
chnlgroup = 0;
if (rtlphy->current_chan_bw ==
HT_CHANNEL_WIDTH_20_40) {
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("customer's limit, 40MHzrf(%c) = "
- "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ "customer's limit, 40MHzrf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
rtlefuse->pwrgroup_ht40[rf]
- [channel - 1]));
+ [channel - 1]);
} else {
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("customer's limit, 20MHz rf(%c) = "
- "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ "customer's limit, 20MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
rtlefuse->pwrgroup_ht20[rf]
- [channel - 1]));
+ [channel - 1]);
}
for (i = 0; i < 4; i++) {
pwr_diff_limit[i] =
@@ -297,22 +296,22 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
(pwr_diff_limit[2] << 16) |
(pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Customer's limit rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), customer_limit));
+ "Customer's limit rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', customer_limit);
writeVal = customer_limit + ((index < 2) ?
powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Customer, writeVal rf(%c)= 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ "Customer, writeVal rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
default:
chnlgroup = 0;
writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
[index + (rf ? 8 : 0)] + ((index < 2) ?
powerBase0[rf] : powerBase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
- "performance, writeValrf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeVal));
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "RTK better performance, writeValrf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeVal);
break;
}
if (rtlpriv->dm.dynamic_txhighpower_lvl ==
@@ -365,7 +364,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset = regoffset_b[index];
rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Set 0x%x = %08x\n", regoffset, writeVal));
+ "Set 0x%x = %08x\n", regoffset, writeVal);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
(regoffset == RTXAGC_A_MCS15_MCS12 ||
regoffset == RTXAGC_B_MCS15_MCS12)) ||
@@ -480,13 +479,13 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
BRFSI_RFENV << 16, u4_regvalue);
break;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio[%d] Fail!!", rfpath));
+ "Radio[%d] Fail!!", rfpath);
goto phy_rf_cfg_fail;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
phy_rf_cfg_fail:
return rtstatus;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 500a2094b6bb..090fd33a158d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 6d2ca773bbc7..82c85286ab2e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -41,7 +41,6 @@
#include "trx.h"
#include "led.h"
#include "hw.h"
-#include <linux/vmalloc.h>
#include <linux/module.h>
MODULE_AUTHOR("Georgia <georgia@realtek.com>");
@@ -54,7 +53,6 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- const struct firmware *firmware;
int err;
rtlpriv->dm.dm_initialgain_enable = true;
@@ -62,29 +60,21 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
- rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
if (!rtlpriv->rtlhal.pfirmware) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't alloc buffer for fw.\n"));
+ "Can't alloc buffer for fw\n");
return 1;
}
- /* request fw */
- err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
- rtlpriv->io.dev);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to request firmware!\n"));
- return 1;
- }
- if (firmware->size > 0x4000) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Firmware is too big!\n"));
- release_firmware(firmware);
- return 1;
- }
- memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
- rtlpriv->rtlhal.fwsize = firmware->size;
- release_firmware(firmware);
+
+ pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ rtlpriv->max_fw_size = 0x4000;
+ err = request_firmware_nowait(THIS_MODULE, 1,
+ rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ GFP_KERNEL, hw, rtl_fw_cb);
+
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index 43b1177924ab..a1310abd0d54 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
index d57ef5e88a9e..966be519edb8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
index c3d5cd826cfa..4b020e9e30b1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index b3cc7b949992..21bc827c5fa6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
if (bwificfg) { /* for WMM */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("USB Chip-B & WMM Setting.....\n"));
+ "USB Chip-B & WMM Setting.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 2;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -118,7 +118,7 @@ static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
} else { /* typical setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("USB typical Setting.....\n"));
+ "USB typical Setting.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 3;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 2;
@@ -135,7 +135,7 @@ static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("USB 3EP Setting for WMM.....\n"));
+ "USB 3EP Setting for WMM.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 5;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -145,7 +145,7 @@ static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
} else { /* typical setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("USB 3EP Setting for typical.....\n"));
+ "USB 3EP Setting for typical.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 5;
ep_map->ep_mapping[RTL_TXQ_BK] = 5;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -244,8 +244,8 @@ u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
break;
default:
hw_queue_index = RTL_TXQ_BE;
- RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
- mac80211_queue_index));
+ RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n",
+ mac80211_queue_index);
break;
}
out:
@@ -270,23 +270,23 @@ static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
case 0: /* VO */
qsel = QSLT_VO;
RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- ("VO queue, set qsel = 0x%x\n", QSLT_VO));
+ "VO queue, set qsel = 0x%x\n", QSLT_VO);
break;
case 1: /* VI */
qsel = QSLT_VI;
RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- ("VI queue, set qsel = 0x%x\n", QSLT_VI));
+ "VI queue, set qsel = 0x%x\n", QSLT_VI);
break;
case 3: /* BK */
qsel = QSLT_BK;
RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- ("BK queue, set qsel = 0x%x\n", QSLT_BK));
+ "BK queue, set qsel = 0x%x\n", QSLT_BK);
break;
case 2: /* BE */
default:
qsel = QSLT_BE;
RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- ("BE queue, set qsel = 0x%x\n", QSLT_BE));
+ "BE queue, set qsel = 0x%x\n", QSLT_BE);
break;
}
out:
@@ -422,17 +422,17 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
bv = ieee80211_is_probe_resp(fc);
if (bv)
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Got probe response frame.\n"));
+ "Got probe response frame\n");
if (ieee80211_is_beacon(fc))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Got beacon frame.\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n");
if (ieee80211_is_data(fc))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
- "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
- (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
- (u32)hdr->addr1[5]));
+ "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n",
+ fc,
+ (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+ (u32)hdr->addr1[2], (u32)hdr->addr1[3],
+ (u32)hdr->addr1[4], (u32)hdr->addr1[5]);
memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
ieee80211_rx_irqsafe(hw, skb);
}
@@ -594,7 +594,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- ("Enable RDG function.\n"));
+ "Enable RDG function\n");
SET_TX_DESC_RDG_ENABLE(txdesc, 1);
SET_TX_DESC_HTC(txdesc, 1);
}
@@ -620,7 +620,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_BMC(txdesc, 1);
_rtl_fill_usb_tx_desc(txdesc);
_rtl_tx_desc_checksum(txdesc);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
}
void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
@@ -677,7 +677,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_HWSEQ_EN(pdesc, 1);
SET_TX_DESC_PKT_ID(pdesc, 8);
}
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content",
pdesc, RTL_TX_DESC_SIZE);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 53de5f66e242..332b06e78b00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
index 946304771748..eafdf76ed64d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 3cd0736fe8e1..4737018c9daa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -246,23 +246,21 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("Cnt_Fast_Fsync_fail = %x, "
- "Cnt_SB_Search_fail = %x\n",
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
falsealm_cnt->cnt_fast_fsync_fail,
- falsealm_cnt->cnt_sb_search_fail));
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("Cnt_Parity_Fail = %x, "
- "Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, "
- "Cnt_Mcs_fail = %x\n",
+ falsealm_cnt->cnt_sb_search_fail);
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
falsealm_cnt->cnt_parity_fail,
falsealm_cnt->cnt_rate_illegal,
falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail));
+ falsealm_cnt->cnt_mcs_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("Cnt_Ofdm_fail = %x, " "Cnt_Cck_fail = %x, "
- "Cnt_all = %x\n",
+ "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
falsealm_cnt->cnt_ofdm_fail,
falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all));
+ falsealm_cnt->cnt_all);
}
static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
@@ -275,7 +273,7 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
de_digtable.min_undecorated_pwdb_for_dm = 0;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- ("Not connected to any\n"));
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
@@ -283,25 +281,25 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.UNDEC_SM_PWDB));
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.UNDEC_SM_PWDB);
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- ("STA Default Port PWDB = 0x%x\n",
- de_digtable.min_undecorated_pwdb_for_dm));
+ "STA Default Port PWDB = 0x%x\n",
+ de_digtable.min_undecorated_pwdb_for_dm);
}
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- ("AP Ext Port or disconnet PWDB = 0x%x\n",
- de_digtable.min_undecorated_pwdb_for_dm));
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ de_digtable.min_undecorated_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n",
- de_digtable.min_undecorated_pwdb_for_dm));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ de_digtable.min_undecorated_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -340,14 +338,14 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
}
de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("CurSTAConnectState=%s\n",
- (de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
- "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT")));
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("CCKPDStage=%s\n",
- (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
- "Low RSSI " : "High RSSI ")));
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("is92d single phy =%x\n",
- IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
+ de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
+ "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
+ de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+ "Low RSSI " : "High RSSI ");
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
}
@@ -355,12 +353,12 @@ void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("cur_igvalue = 0x%x, "
- "pre_igvalue = 0x%x, backoff_val = %d\n",
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
de_digtable.cur_igvalue, de_digtable.pre_igvalue,
- de_digtable.backoff_val));
+ de_digtable.backoff_val);
if (de_digtable.dig_enable_flag == false) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("DIG is disabled\n"));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable.pre_igvalue = 0x17;
return;
}
@@ -377,22 +375,21 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
{
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("IOT_PEER = CISCO\n"));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50
&& de_digtable.min_undecorated_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("Early Mode Off\n"));
+ "Early Mode Off\n");
} else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 &&
de_digtable.min_undecorated_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("Early Mode On\n"));
+ "Early Mode On\n");
}
} else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("Early Mode On\n"));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
}
}
@@ -402,13 +399,13 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
u8 value_igi = de_digtable.cur_igvalue;
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("==>\n"));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
de_digtable.last_min_undecorated_pwdb_for_dm =
de_digtable.min_undecorated_pwdb_for_dm;
}
- if (rtlpriv->dm.dm_initialgain_enable == false)
+ if (!rtlpriv->dm.dm_initialgain_enable)
return;
/* because we will send data pkt when scanning
@@ -421,7 +418,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
/* Not STA mode return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("progress\n"));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
de_digtable.cursta_connectctate = DIG_STA_CONNECT;
@@ -438,16 +435,16 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable.large_fa_hit, de_digtable.forbidden_igi));
+ "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
- de_digtable.recover_cnt, de_digtable.rx_gain_range_min));
+ "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
+ de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
/* deal with abnorally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("dm_DIG(): Abnornally false alarm case.\n"));
+ "dm_DIG(): Abnormally false alarm case\n");
de_digtable.large_fa_hit++;
if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) {
@@ -486,11 +483,11 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
}
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable.large_fa_hit, de_digtable.forbidden_igi));
+ "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- ("dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
- de_digtable.recover_cnt, de_digtable.rx_gain_range_min));
+ "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
+ de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
if (value_igi > DM_DIG_MAX)
value_igi = DM_DIG_MAX;
@@ -500,7 +497,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
rtl92d_dm_write_dig(hw);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
rtl92d_dm_cck_packet_detection_thresh(hw);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("<<==\n"));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
}
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
@@ -528,7 +525,7 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Not connected to any\n"));
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
@@ -538,40 +535,40 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("IBSS Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "IBSS Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
if (undecorated_smoothed_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- ("5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n"));
+ "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb < 0x33)
&& (undecorated_smoothed_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- ("5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n"));
+ "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- ("5G:TxHighPwrLevel_Normal\n"));
+ "5G:TxHighPwrLevel_Normal\n");
}
} else {
if (undecorated_smoothed_pwdb >=
@@ -579,7 +576,7 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
@@ -589,19 +586,19 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("TXHIGHPWRLEVEL_NORMAL\n"));
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel));
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
@@ -717,7 +714,7 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
rtlpriv->dm.thermalvalue_rxgain)]) << 12;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("===> Rx Gain %x\n", u4tmp));
+ "===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
@@ -741,27 +738,22 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
if (!memcmp((void *)&temp_cck,
(void *)&cckswing_table_ch14[i][2], 4)) {
*cck_index_old = (u8) i;
- RT_TRACE(rtlpriv,
- COMP_POWER_TRACKING,
- DBG_LOUD,
- ("Initial reg0x%x = 0x%lx, "
- "cck_index=0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2,
- temp_cck, *cck_index_old,
- rtlpriv->dm.cck_inch14));
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
} else {
if (!memcmp((void *) &temp_cck,
&cckswing_table_ch1ch13[i][2], 4)) {
*cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- ("Initial reg0x%x = 0x%lx, "
- "cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2,
- temp_cck, *cck_index_old,
- rtlpriv->dm.cck_inch14));
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -884,12 +876,12 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
};
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("\n"));
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
- "eeprom_thermalmeter 0x%x\n", thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter));
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
if (is2t)
@@ -904,10 +896,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Initial pathA ele_d reg0x%x = 0x%lx,"
- " ofdm_index=0x%x\n",
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XATxIQIMBALANCE,
- ele_d, ofdm_index_old[0]));
+ ele_d, ofdm_index_old[0]);
break;
}
}
@@ -920,11 +911,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
ofdm_index_old[1] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
- ("Initial pathB ele_d reg "
- "0x%x = 0x%lx, ofdm_index "
- "= 0x%x\n",
+ "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XBTxIQIMBALANCE, ele_d,
- ofdm_index_old[1]));
+ ofdm_index_old[1]);
break;
}
}
@@ -952,7 +941,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("reload ofdm index for band switch\n"));
+ "reload ofdm index for band switch\n");
}
rtlpriv->dm.thermalvalue_avg
[rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
@@ -995,12 +984,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
(thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
(rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x"
- " eeprom_thermalmeter 0x%x delta 0x%x "
- "delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk));
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
if ((delta_lck > rtlefuse->delta_lck) &&
(rtlefuse->delta_lck != 0)) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -1036,17 +1023,15 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("temp OFDM_A_index=0x%x, OFDM_B_index"
- " = 0x%x,cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index));
+ "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("temp OFDM_A_index=0x%x,cck_index = "
- "0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index));
+ "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
@@ -1070,15 +1055,13 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("new OFDM_A_index=0x%x, OFDM_B_index "
- "= 0x%x, cck_index=0x%x\n",
+ "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
ofdm_index[0], ofdm_index[1],
- cck_index));
+ cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("new OFDM_A_index=0x%x,cck_index = "
- "0x%x\n",
- ofdm_index[0], cck_index));
+ "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ ofdm_index[0], cck_index);
}
ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
0xFFC00000) >> 22;
@@ -1124,12 +1107,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("TxPwrTracking for interface %d path A: X ="
- " 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = "
- "0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = "
- "0x%lx\n", rtlhal->interfaceindex,
+ "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
+ rtlhal->interfaceindex,
val_x, val_y, ele_a, ele_c, ele_d,
- val_x, val_y));
+ val_x, val_y);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
@@ -1232,20 +1213,16 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
BIT(28), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("TxPwrTracking path B: X = 0x%lx, "
- "Y = 0x%lx ele_A = 0x%lx ele_C = 0x"
- "%lx ele_D = 0x%lx 0xeb4 = 0x%lx "
- "0xebc = 0x%lx\n",
- val_x, val_y, ele_a, ele_c,
- ele_d, val_x, val_y));
+ "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c,
+ ele_d, val_x, val_y);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("TxPwrTracking 0xc80 = 0x%x, 0xc94 = "
- "0x%x RF 0x24 = 0x%x\n",
+ "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- BRFREGOFFSETMASK)));
+ BRFREGOFFSETMASK));
}
if ((delta_iqk > rtlefuse->delta_iqk) &&
(rtlefuse->delta_iqk != 0)) {
@@ -1262,7 +1239,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
rtlpriv->dm.thermalvalue = thermalvalue;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -1273,8 +1250,8 @@ static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.txpower_trackinginit = false;
rtlpriv->dm.txpower_track_control = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking));
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
@@ -1289,12 +1266,12 @@ void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
BIT(16), 0x03);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Trigger 92S Thermal Meter!!\n"));
+ "Trigger 92S Thermal Meter!!\n");
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Schedule TxPowerTracking direct call!!\n"));
+ "Schedule TxPowerTracking direct call!!\n");
rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
tm_trigger = 0;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
index 69354657f0f5..91030ec8ac3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index 82f060bdbc0b..f548a8d0068d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -124,14 +124,14 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainSize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
_rtl92d_fill_dummy(bufferPtr, &size);
pagenums = size / FW_8192D_PAGE_SIZE;
remainSize = size % FW_8192D_PAGE_SIZE;
if (pagenums > 8) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Page numbers should not greater then 8\n"));
+ "Page numbers should not greater then 8\n");
}
for (page = 0; page < pagenums; page++) {
offset = page * FW_8192D_PAGE_SIZE;
@@ -158,12 +158,12 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
(!(value32 & FWDL_ChkSum_rpt)));
if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32));
+ "chksum report faill ! REG_MCUFWDL:0x%08x\n",
+ value32);
return -EIO;
}
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+ "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32);
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
@@ -186,9 +186,9 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
udelay(50);
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
}
- RT_ASSERT((delay > 0), ("8051 reset failed!\n"));
+ RT_ASSERT((delay > 0), "8051 reset failed!\n");
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("=====> 8051 reset success (%d) .\n", delay));
+ "=====> 8051 reset success (%d)\n", delay);
}
static int _rtl92d_fw_init(struct ieee80211_hw *hw)
@@ -197,7 +197,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 counter;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("FW already have download\n"));
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
/* polling for FW ready */
counter = 0;
do {
@@ -205,10 +205,9 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
MAC0_READY) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Polling FW ready success!! "
- "REG_MCUFWDL: 0x%x .\n",
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
rtl_read_byte(rtlpriv,
- FW_MAC0_READY)));
+ FW_MAC0_READY));
return 0;
}
udelay(5);
@@ -216,10 +215,9 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
MAC1_READY) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Polling FW ready success!! "
- "REG_MCUFWDL: 0x%x .\n",
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
rtl_read_byte(rtlpriv,
- FW_MAC1_READY)));
+ FW_MAC1_READY));
return 0;
}
udelay(5);
@@ -228,18 +226,16 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
if (rtlhal->interfaceindex == 0) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Polling FW ready fail!! MAC0 FW init not ready: "
- "0x%x .\n",
- rtl_read_byte(rtlpriv, FW_MAC0_READY)));
+ "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC0_READY));
} else {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Polling FW ready fail!! MAC1 FW init not ready: "
- "0x%x .\n",
- rtl_read_byte(rtlpriv, FW_MAC1_READY)));
+ "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC1_READY));
}
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Polling FW ready fail!! REG_MCUFWDL:0x%08ul .\n",
- rtl_read_dword(rtlpriv, REG_MCUFWDL)));
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n",
+ rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1;
}
@@ -257,20 +253,20 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
bool fw_downloaded = false, fwdl_in_process = false;
unsigned long flags;
- if (!rtlhal->pfirmware)
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
fwsize = rtlhal->fwsize;
pfwheader = (u8 *) rtlhal->pfirmware;
pfwdata = (u8 *) rtlhal->pfirmware;
rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, (" FirmwareVersion(%d),"
- "FirmwareSubVersion(%d), Signature(%#x)\n",
- rtlhal->fw_version, rtlhal->fw_subversion,
- GET_FIRMWARE_HDR_SIGNATURE(pfwheader)));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
+ rtlhal->fw_version, rtlhal->fw_subversion,
+ GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Shift 32 bytes for FW header!!\n"));
+ "Shift 32 bytes for FW header!!\n");
pfwdata = pfwdata + 32;
fwsize = fwsize - 32;
}
@@ -302,8 +298,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
break;
else
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- ("Wait for another mac "
- "download fw\n"));
+ "Wait for another mac download fw\n");
}
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
value = rtl_read_byte(rtlpriv, 0x1f);
@@ -337,11 +332,10 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("fw is not ready to run!\n"));
+ "fw is not ready to run!\n");
goto exit;
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- ("fw is ready to run!\n"));
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n");
}
exit:
err = _rtl92d_fw_init(hw);
@@ -381,24 +375,24 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Return as RF is off!!!\n"));
+ "Return as RF is off!!!\n");
return;
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("H2C set in progress! Wait to set.."
- "element_id(%d).\n", element_id));
+ "H2C set in progress! Wait to set..element_id(%d)\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Wait 100 us (%d times)...\n",
- h2c_waitcounter));
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -418,8 +412,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Write H2C fail because no trigger "
- "for FW INT!\n"));
+ "Write H2C fail because no trigger for FW INT!\n");
break;
}
boxnum = rtlhal->last_hmeboxnum;
@@ -442,7 +435,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
@@ -450,29 +443,29 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Wating too long for FW read "
- "clear HMEBox(%d)!\n", boxnum));
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
udelay(10);
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Wating for FW read clear HMEBox(%d)!!! "
- "0x1BF = %2x\n", boxnum, u1b_tmp));
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Write H2C register BOX[%d] fail!!!!! "
- "Fw do not read.\n", boxnum));
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id));
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
boxcontent[0] &= ~(BIT(7));
@@ -519,7 +512,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
bwrite_sucess = true;
@@ -527,26 +520,20 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum));
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
u8 element_id, u32 cmd_len, u8 *cmdbuffer)
{
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 tmp_cmdbuf[2];
- if (rtlhal->fw_ready == false) {
- RT_ASSERT(false, ("return H2C cmd because of Fw "
- "download fail!!!\n"));
- return;
- }
memset(tmp_cmdbuf, 0, 8);
memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
_rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
@@ -559,13 +546,13 @@ void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[3] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
ppsc->reg_max_lps_awakeintvl);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+ "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode",
u1_h2c_set_pwrmode, 3);
rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
}
@@ -757,28 +744,32 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
- "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
&reserved_page_packet[0], totalpacketlen);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
u1RsvdPageLoc, 3);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet,
- totalpacketlen);
- rtstatus = _rtl92d_cmd_send_packet(hw, skb);
+ if (!skb) {
+ dlok = false;
+ } else {
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet, totalpacketlen);
+ rtstatus = _rtl92d_cmd_send_packet(hw, skb);
- if (rtstatus)
- dlok = true;
+ if (rtstatus)
+ dlok = true;
+ }
if (dlok) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Set RSVD page location to Fw.\n"));
+ "Set RSVD page location to Fw\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 3);
+ "H2C_RSVDPAGE", u1RsvdPageLoc, 3);
rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
} else
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+ "Set RSVD page location to Fw FAIL!!!!!!\n");
}
void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index 0c4d489eaa48..1ffacdda734c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 9d89d7ccdafb..509f5af38adf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -166,7 +166,7 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
@@ -230,7 +230,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -261,8 +261,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
min_spacing_to_set);
*val = min_spacing_to_set;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
@@ -275,8 +275,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
@@ -310,8 +310,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoSet);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset));
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -344,8 +344,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("HW_VAR_ACM_CTRL acm set "
- "failed: eACI is %d\n", acm));
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -361,13 +361,13 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
- "Write 0x%X\n", acm_ctrl));
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -502,7 +502,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
@@ -522,8 +522,8 @@ static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to polling write LLT done at "
- "address %d!\n", address));
+ "Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -707,7 +707,7 @@ static bool _rtl92de_init_mac(struct ieee80211_hw *hw)
/* System init */
/* 18. LLT_table_init(Adapter); */
- if (_rtl92de_llt_table_init(hw) == false)
+ if (!_rtl92de_llt_table_init(hw))
return false;
/* Clear interrupt and enable interrupt */
@@ -879,12 +879,12 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm));
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("not open hw encryption\n"));
+ "not open hw encryption\n");
return;
}
sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
@@ -895,7 +895,7 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The SECR-value %x\n", sec_reg_value));
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -920,8 +920,8 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
rtl92d_phy_reset_iqk_result(hw);
/* rtlpriv->intf_ops->disable_aspm(hw); */
rtstatus = _rtl92de_init_mac(hw);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
return err;
@@ -930,12 +930,8 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Failed to download FW. Init HW "
- "without FW..\n"));
- rtlhal->fw_ready = false;
+ "Failed to download FW. Init HW without FW..\n");
return 1;
- } else {
- rtlhal->fw_ready = true;
}
rtlhal->last_hmeboxnum = 0;
rtlpriv->psc.fw_current_inpsmode = false;
@@ -946,7 +942,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
if (rtlhal->earlymode_enable) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EarlyMode Enabled!!!\n"));
+ "EarlyMode Enabled!!!\n");
tmp_u1b = rtl_read_byte(rtlpriv, 0x4d0);
tmp_u1b = tmp_u1b | 0x1f;
@@ -1064,10 +1060,10 @@ static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (!(value32 & 0x000f0000)) {
version = VERSION_TEST_CHIP_92D_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("TEST CHIP!!!\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
} else {
version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Normal CHIP!!!\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
}
return version;
}
@@ -1092,8 +1088,8 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
_rtl92de_disable_bcn_sub_func(hw);
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Set HW_VAR_MEDIA_STATUS: No such media "
- "status(%x).\n", type));
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
+ type);
}
bcnfunc_enable = rtl_read_byte(rtlpriv, REG_BCN_CTRL);
switch (type) {
@@ -1102,30 +1098,30 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
ledaction = LED_CTL_LINK;
bcnfunc_enable &= 0xF7;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to NO LINK!\n"));
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
bcnfunc_enable |= 0x08;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to Ad Hoc!\n"));
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
bcnfunc_enable &= 0xF7;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to STA!\n"));
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
bcnfunc_enable |= 0x08;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to AP!\n"));
+ "Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Network type %d not support!\n", type));
+ "Network type %d not supported!\n", type);
return 1;
break;
@@ -1151,7 +1147,7 @@ void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
_rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(4));
- } else if (check_bssid == false) {
+ } else if (!check_bssid) {
reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
_rtl92de_set_bcn_ctrl_reg(hw, BIT(4), 0);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
@@ -1189,7 +1185,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
if (!rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done) {
RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
- ("Do IQK for channel:%d.\n", channel));
+ "Do IQK for channel:%d\n", channel);
rtl92d_phy_iq_calibrate(hw);
}
}
@@ -1214,7 +1210,7 @@ void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ RT_ASSERT(false, "invalid aci: %d !\n", aci);
break;
}
}
@@ -1305,8 +1301,8 @@ static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("In PowerOff,reg0x%x=%X\n", REG_SPS0_CTRL,
- rtl_read_byte(rtlpriv, REG_SPS0_CTRL)));
+ "In PowerOff,reg0x%x=%X\n",
+ REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
/* r. Note: for PCIe interface, PON will not turn */
/* off m-bias and BandGap in PCIe suspend mode. */
@@ -1319,7 +1315,7 @@ static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&globalmutex_power, flags);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<=======\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
}
void rtl92de_card_disable(struct ieee80211_hw *hw)
@@ -1377,7 +1373,7 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff);
udelay(50);
rtl_write_byte(rtlpriv, REG_CR, 0x0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==> Do power off.......\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
if (rtl92d_phy_check_poweroff(hw))
_rtl92de_poweroff_adapter(hw);
return;
@@ -1425,7 +1421,7 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
u16 bcn_interval = mac->beacon_interval;
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- ("beacon_interval:%d\n", bcn_interval));
+ "beacon_interval:%d\n", bcn_interval);
/* rtl92de_disable_interrupt(hw); */
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
/* rtl92de_enable_interrupt(hw); */
@@ -1437,8 +1433,8 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
if (rm_msr)
@@ -1615,9 +1611,9 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
rtlefuse->internal_pa_5g[1] =
!((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Is D cut,Internal PA0 %d Internal PA1 %d\n",
+ "Is D cut,Internal PA0 %d Internal PA1 %d\n",
rtlefuse->internal_pa_5g[0],
- rtlefuse->internal_pa_5g[1]))
+ rtlefuse->internal_pa_5g[1]);
}
rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
@@ -1667,14 +1663,14 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
if (rtlefuse->eeprom_c9 == 0xFF)
rtlefuse->eeprom_c9 = 0x00;
RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+ "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("CrystalCap = 0x%x\n", rtlefuse->crystalcap));
+ "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("Delta_IQK = 0x%x Delta_LCK = 0x%x\n", rtlefuse->delta_iqk,
- rtlefuse->delta_lck));
+ "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
+ rtlefuse->delta_iqk, rtlefuse->delta_lck);
for (rfPath = 0; rfPath < RF6052_MAX_PATH; rfPath++) {
for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
@@ -1710,11 +1706,11 @@ static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
if (macphy_crvalue & BIT(3)) {
rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("MacPhyMode SINGLEMAC_SINGLEPHY\n"));
+ "MacPhyMode SINGLEMAC_SINGLEPHY\n");
} else {
rtlhal->macphymode = DUALMAC_DUALPHY;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("MacPhyMode DUALMAC_DUALPHY\n"));
+ "MacPhyMode DUALMAC_DUALPHY\n");
}
}
@@ -1741,15 +1737,15 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
switch (chipvalue) {
case 0xAA55:
chipver |= CHIP_92D_C_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("C-CUT!!!\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
break;
case 0x9966:
chipver |= CHIP_92D_D_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("D-CUT!!!\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
break;
default:
chipver |= CHIP_92D_D_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("Unkown CUT!\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unkown CUT!\n");
break;
}
rtlpriv->rtlhal.version = chipver;
@@ -1775,23 +1771,23 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
HWSET_MAX_SIZE);
} else if (rtlefuse->epromtype == EEPROM_93C46) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("RTL819X Not boot from eeprom, check it !!"));
+ "RTL819X Not boot from eeprom, check it !!\n");
}
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE);
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
if (rtlefuse->autoload_failflag) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("RTL819X Not boot from eeprom, check it !!"));
+ "RTL819X Not boot from eeprom, check it !!\n");
return;
}
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
@@ -1802,16 +1798,15 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROMId = 0x%4x\n", eeprom_id));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
/* Read Permanent MAC address */
if (rtlhal->interfaceindex == 0) {
@@ -1827,8 +1822,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
}
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
rtlefuse->dev_addr);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("%pM\n", rtlefuse->dev_addr));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
_rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
/* Read Channel Plan */
@@ -1849,7 +1843,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
}
void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1863,19 +1857,19 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
rtlefuse->autoload_status = tmp_u1b;
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92de_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
}
return;
}
@@ -1958,8 +1952,8 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
(shortgi_rate << 4) | (shortgi_rate);
}
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2092,8 +2086,8 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- ("ratr_bitmap :%x value0:%x value1:%x\n",
- ratr_bitmap, value[0], value[1]));
+ "ratr_bitmap :%x value0:%x value1:%x\n",
+ ratr_bitmap, value[0], value[1]);
rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value);
if (macid != 0)
sta_entry->ratr_index = ratr_index;
@@ -2153,14 +2147,13 @@ bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("GPIOChangeRF - HW Radio ON, RF ON\n"));
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
- } else if ((ppsc->hwradiooff == false)
- && (e_rfpowerstate_toset == ERFOFF)) {
+ } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
actuallyset = true;
@@ -2204,7 +2197,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 idx;
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
@@ -2230,8 +2223,8 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
- "not process\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
enc_algo = CAM_TKIP;
break;
}
@@ -2248,9 +2241,8 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG, ("Can not "
- "find free hw security"
- " cam entry\n"));
+ DBG_EMERG,
+ "Can not find free hw security cam entry\n");
return;
}
} else {
@@ -2262,29 +2254,29 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("delete one entry, entry_id is %d\n",
- entry_id));
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]));
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("add one entry\n"));
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
- "Pairwiase Key content :",
+ "Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->
sec.key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set Pairwiase key\n"));
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
@@ -2292,7 +2284,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
sec.key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set group key\n"));
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
index ad44ffa520e6..7c9f7a2f1e42 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/rtlwifi/rtl8192de/led.c
index f1552f4df658..76a57ae4af3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -45,8 +45,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -71,7 +71,7 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = true;
@@ -83,8 +83,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -106,7 +106,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = false;
@@ -153,7 +153,7 @@ void rtl92de_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n", ledaction));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
_rtl92ce_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/rtlwifi/rtl8192de/led.h
index 57f4a3c583d4..a29df30c3025 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/led.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 0883349e1c83..34591eeb8376 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -204,8 +204,8 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
u32 returnvalue, originalvalue, bitshift;
u8 dbi_direct;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "bitmask(%#x)\n", regaddr, bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob) {
/* mac1 use phy0 read radio_b. */
/* mac0 use phy1 read radio_b. */
@@ -220,8 +220,9 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
}
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
- "Addr[0x%x]=0x%x\n", bitmask, regaddr, originalvalue));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -233,8 +234,9 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
u8 dbi_direct = 0;
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask, data));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (rtlhal->during_mac1init_radioa)
dbi_direct = BIT(3);
else if (rtlhal->during_mac0init_radiob)
@@ -255,8 +257,9 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl92de_write_dword_dbi(hw, (u16) regaddr, data, dbi_direct);
else
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask, data));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -300,8 +303,8 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x] = 0x%x\n",
- rfpath, pphyreg->rflssi_readback, retvalue));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
+ rfpath, pphyreg->rflssi_readback, retvalue);
return retvalue;
}
@@ -319,8 +322,8 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
/* T65 RF */
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, BMASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -330,17 +333,17 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
unsigned long flags;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
- "bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -353,8 +356,8 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath));
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
if (bitmask == 0)
return;
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
@@ -369,9 +372,9 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
_rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
}
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
- "bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
@@ -381,10 +384,10 @@ bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = MAC_2T_ARRAYLENGTH;
ptrarray = rtl8192de_mac_2tarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Img:Rtl819XMAC_Array\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:Rtl819XMAC_Array\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
@@ -561,25 +564,25 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
agctab_arraylen = AGCTAB_ARRAYLENGTH;
agctab_array_table = rtl8192de_agctab_array;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- (" ===> phy:MAC0, Rtl819XAGCTAB_Array\n"));
+ " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
agctab_arraylen = AGCTAB_2G_ARRAYLENGTH;
agctab_array_table = rtl8192de_agctab_2garray;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- (" ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n"));
+ " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
} else {
agctab_5garraylen = AGCTAB_5G_ARRAYLENGTH;
agctab_5garray_table = rtl8192de_agctab_5garray;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- (" ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n"));
+ " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
}
}
phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH;
phy_regarray_table = rtl8192de_phy_reg_2tarray;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- (" ===> phy:Rtl819XPHY_REG_Array_PG\n"));
+ " ===> phy:Rtl819XPHY_REG_Array_PG\n");
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
if (phy_regarray_table[i] == 0xfe)
@@ -598,10 +601,9 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
phy_regarray_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The phy_regarray_table[0] is %x"
- " Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]));
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
if (rtlhal->interfaceindex == 0) {
@@ -613,15 +615,12 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The Rtl819XAGCTAB_Array_"
- "Table[0] is %ul "
- "Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
agctab_array_table[i],
- agctab_array_table[i + 1]));
+ agctab_array_table[i + 1]);
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Normal Chip, MAC0, load "
- "Rtl819XAGCTAB_Array\n"));
+ "Normal Chip, MAC0, load Rtl819XAGCTAB_Array\n");
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
@@ -632,14 +631,12 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The Rtl819XAGCTAB_Array_"
- "Table[0] is %ul Rtl819XPHY_"
- "REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
agctab_array_table[i],
- agctab_array_table[i + 1]));
+ agctab_array_table[i + 1]);
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Load Rtl819XAGCTAB_2GArray\n"));
+ "Load Rtl819XAGCTAB_2GArray\n");
} else {
for (i = 0; i < agctab_5garraylen; i = i + 2) {
rtl_set_bbreg(hw,
@@ -650,14 +647,12 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("The Rtl819XAGCTAB_5GArray_"
- "Table[0] is %ul Rtl819XPHY_"
- "REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_5GArray_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
agctab_5garray_table[i],
- agctab_5garray_table[i + 1]));
+ agctab_5garray_table[i + 1]);
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Load Rtl819XAGCTAB_5GArray\n"));
+ "Load Rtl819XAGCTAB_5GArray\n");
}
}
}
@@ -670,152 +665,51 @@ static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ int index;
+
+ if (regaddr == RTXAGC_A_RATE18_06)
+ index = 0;
+ else if (regaddr == RTXAGC_A_RATE54_24)
+ index = 1;
+ else if (regaddr == RTXAGC_A_CCK1_MCS32)
+ index = 6;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00)
+ index = 7;
+ else if (regaddr == RTXAGC_A_MCS03_MCS00)
+ index = 2;
+ else if (regaddr == RTXAGC_A_MCS07_MCS04)
+ index = 3;
+ else if (regaddr == RTXAGC_A_MCS11_MCS08)
+ index = 4;
+ else if (regaddr == RTXAGC_A_MCS15_MCS12)
+ index = 5;
+ else if (regaddr == RTXAGC_B_RATE18_06)
+ index = 8;
+ else if (regaddr == RTXAGC_B_RATE54_24)
+ index = 9;
+ else if (regaddr == RTXAGC_B_CCK1_55_MCS32)
+ index = 14;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff)
+ index = 15;
+ else if (regaddr == RTXAGC_B_MCS03_MCS00)
+ index = 10;
+ else if (regaddr == RTXAGC_B_MCS07_MCS04)
+ index = 11;
+ else if (regaddr == RTXAGC_B_MCS11_MCS08)
+ index = 12;
+ else if (regaddr == RTXAGC_B_MCS15_MCS12)
+ index = 13;
+ else
+ return;
- if (regaddr == RTXAGC_A_RATE18_06) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][0]));
- }
- if (regaddr == RTXAGC_A_RATE54_24) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][1]));
- }
- if (regaddr == RTXAGC_A_CCK1_MCS32) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][6]));
- }
- if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][7]));
- }
- if (regaddr == RTXAGC_A_MCS03_MCS00) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][2]));
- }
- if (regaddr == RTXAGC_A_MCS07_MCS04) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][3]));
- }
- if (regaddr == RTXAGC_A_MCS11_MCS08) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][4]));
- }
- if (regaddr == RTXAGC_A_MCS15_MCS12) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][5]));
- }
- if (regaddr == RTXAGC_B_RATE18_06) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][8]));
- }
- if (regaddr == RTXAGC_B_RATE54_24) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][9]));
- }
- if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][14]));
- }
- if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][15]));
- }
- if (regaddr == RTXAGC_B_MCS03_MCS00) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][10]));
- }
- if (regaddr == RTXAGC_B_MCS07_MCS04) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][11]));
- }
- if (regaddr == RTXAGC_B_MCS11_MCS08) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][12]));
- }
- if (regaddr == RTXAGC_B_MCS15_MCS12) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
- data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%ulx\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][13]));
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt, index,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][index]);
+ if (index == 13)
rtlphy->pwrgroup_cnt++;
- }
}
static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
@@ -849,7 +743,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
} else {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- ("configtype != BaseBand_Config_PHY_REG\n"));
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -861,17 +755,17 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
/* if (rtlphy->rf_type == RF_1T2R) {
* _rtl92c_phy_bb_config_1t(hw);
- * RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+ * RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
*} */
if (rtlefuse->autoload_failflag == false) {
@@ -879,14 +773,14 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
rtstatus = _rtl92d_phy_config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
@@ -951,19 +845,17 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radiob_array_table = rtl8192de_radiob_2t_int_paarray;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("PHY_ConfigRFWithHeaderFile() "
- "Radio_A:Rtl819XRadioA_1TArray\n"));
+ "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("PHY_ConfigRFWithHeaderFile() "
- "Radio_B:Rtl819XRadioB_1TArray\n"));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+ "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
/* this only happens when DMDP, mac0 start on 2.4G,
* mac1 start on 5G, mac 0 has to set phy0&phy1
* pathA or mac1 has to set phy0&phy1 pathA */
if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- (" ===> althougth Path A, we load radiob.txt\n"));
+ " ===> althougth Path A, we load radiob.txt\n");
radioa_arraylen = radiob_arraylen;
radioa_array_table = radiob_array_table;
}
@@ -1022,11 +914,11 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
return true;
@@ -1046,19 +938,18 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, BMASKBYTE0);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Default initial gain (c50=0x%x, "
- "c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]));
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
BMASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
BMASKDWORD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync));
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
@@ -1137,7 +1028,7 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 cckpowerlevel[2], ofdmpowerlevel[2];
- if (rtlefuse->txpwr_fromeprom == false)
+ if (!rtlefuse->txpwr_fromeprom)
return;
channel = _rtl92c_phy_get_rightchnlplace(channel);
_rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0],
@@ -1172,7 +1063,7 @@ void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Unknown Scan Backup operation.\n"));
+ "Unknown Scan Backup operation\n");
break;
}
}
@@ -1193,14 +1084,13 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
return;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("FALSE driver sleep or unload\n"));
+ "FALSE driver sleep or unload\n");
return;
}
rtlphy->set_bwmode_inprogress = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
switch (rtlphy->current_chan_bw) {
@@ -1218,7 +1108,7 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
switch (rtlphy->current_chan_bw) {
@@ -1250,13 +1140,13 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
@@ -1273,7 +1163,7 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 value8;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==>\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
rtlhal->bandset = band;
rtlhal->current_bandtype = band;
if (IS_92D_SINGLEPHY(rtlhal->version))
@@ -1283,13 +1173,13 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* reconfig BB/RF according to wireless mode */
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* BB & RF Config */
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("====>2.4G\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
if (rtlhal->interfaceindex == 1)
_rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
} else {
/* 5G band */
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("====>5G\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
if (rtlhal->interfaceindex == 1)
_rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
@@ -1317,7 +1207,7 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
0 ? REG_MAC0 : REG_MAC1), value8);
}
mdelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<==Switch Band OK.\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
}
static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
@@ -1329,9 +1219,9 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
u8 group, i;
unsigned long flag = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>path %d\n", rfpath));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>5G\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
/* fc area 0xd2c */
@@ -1353,14 +1243,13 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
} else {
/* G band. */
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- ("Load RF IMR parameters for G band. IMR already "
- "setting %d\n",
- rtlpriv->rtlhal.load_imrandiqk_setting_for2g));
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>2.4G\n"));
+ "Load RF IMR parameters for G band. IMR already setting %d\n",
+ rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) {
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- ("Load RF IMR parameters "
- "for G band. %d\n", rfpath));
+ "Load RF IMR parameters for G band. %d\n",
+ rfpath);
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1378,7 +1267,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
@@ -1388,7 +1277,7 @@ static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("====>\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
/*----Store original RFENV control type----*/
switch (rfpath) {
case RF90_PATH_A:
@@ -1414,7 +1303,7 @@ static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
/*Set 0 to 12 bits for 8255 */
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
udelay(1);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<====\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
@@ -1424,7 +1313,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("=====>\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
/*----Restore RFENV control type----*/ ;
switch (rfpath) {
case RF90_PATH_A:
@@ -1437,7 +1326,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
*pu4_regval);
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<=====\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
}
static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
@@ -1451,13 +1340,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
bool need_pwr_down = false, internal_pa = false;
u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
/* config path A for 5G */
if (rtlhal->current_bandtype == BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>5G\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
u4tmp = curveindex_5g[channel - 1];
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("ver 1 set RF-A, 5G, "
- "0x28 = 0x%x !!\n", u4tmp));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
for (i = 0; i < RF_CHNL_NUM_5G; i++) {
if (channel == rf_chnl_5g[i] && channel <= 140)
index = 0;
@@ -1503,12 +1392,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rf_reg_pram_c_5g[index][i]);
}
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("offset 0x%x value 0x%x "
- "path %d index %d readback 0x%x\n",
- rf_reg_for_c_cut_5g[i],
- rf_reg_pram_c_5g[index][i], path,
- index, rtl_get_rfreg(hw, (enum radio_path)path,
- rf_reg_for_c_cut_5g[i], BRFREGOFFSETMASK)));
+ "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_5g[i],
+ rf_reg_pram_c_5g[index][i],
+ path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ BRFREGOFFSETMASK));
}
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1541,11 +1431,10 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
BRFREGOFFSETMASK,
rf_pram_c_5g_int_pa[index][i]);
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- ("offset 0x%x value 0x%x "
- "path %d index %d\n",
+ "offset 0x%x value 0x%x path %d index %d\n",
rf_for_c_cut_5g_internal_pa[i],
rf_pram_c_5g_int_pa[index][i],
- rfpath, index));
+ rfpath, index);
}
} else {
rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
@@ -1553,10 +1442,10 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
}
}
} else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>2.4G\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
u4tmp = curveindex_2g[channel - 1];
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("ver 3 set RF-B, 2G, "
- "0x28 = 0x%x !!\n", u4tmp));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
if (channel == 1 || channel == 2 || channel == 4 || channel == 9
|| channel == 10 || channel == 11 || channel == 12)
index = 0;
@@ -1590,18 +1479,17 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rf_reg_param_for_c_cut_2g
[index][i]);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("offset 0x%x value 0x%x mak 0x%x path %d "
- "index %d readback 0x%x\n",
- rf_reg_for_c_cut_2g[i],
- rf_reg_param_for_c_cut_2g[index][i],
- rf_reg_mask_for_c_cut_2g[i], path, index,
- rtl_get_rfreg(hw, (enum radio_path)path,
- rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK)));
+ "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_2g[i],
+ rf_reg_param_for_c_cut_2g[index][i],
+ rf_reg_mask_for_c_cut_2g[i], path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ BRFREGOFFSETMASK));
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
- rf_syn_g4_for_c_cut_2g | (u4tmp << 11)));
+ "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
BRFREGOFFSETMASK,
@@ -1611,7 +1499,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (rtlhal->during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
@@ -1648,9 +1536,9 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
u32 regeac, rege94, rege9c, regea4;
u8 result = 0;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n");
/* path-A IQK setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A IQK setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
if (rtlhal->interfaceindex == 0) {
rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c1f);
rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c1f);
@@ -1668,26 +1556,26 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x28160206);
}
/* LO calibration setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LO calibration setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
/* One shot, path A LOK & IQK */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("One shot, path A LOK & IQK!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Delay %d ms for One shot, path A LOK & IQK.\n",
- IQK_DELAY_TIME));
+ "Delay %d ms for One shot, path A LOK & IQK\n",
+ IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME);
/* Check failed */
regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe94 = 0x%x\n", rege94));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe9c = 0x%x\n", rege9c));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xea4 = 0x%x\n", regea4));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
(((rege9c & 0x03FF0000) >> 16) != 0x42))
result |= 0x01;
@@ -1698,7 +1586,7 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
(((regeac & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
else
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A Rx IQK fail!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n");
return result;
}
@@ -1719,9 +1607,9 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
TxOKBit = BIT(31);
RxOKBit = BIT(30);
}
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n");
/* path-A IQK setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A IQK setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140307);
@@ -1734,7 +1622,7 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68110000);
}
/* LO calibration setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LO calibration setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
/* path-A PA on */
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x07000f60);
@@ -1742,29 +1630,29 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
/* One shot, path A LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("One shot, path A LOK & IQK!\n"));
+ "One shot, path A LOK & IQK!\n");
rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Delay %d ms for One shot, path A LOK & IQK.\n",
- IQK_DELAY_TIME));
+ "Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME * 10);
/* Check failed */
regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe94 = 0x%x\n", rege94));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe9c = 0x%x\n", rege9c));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xea4 = 0x%x\n", regea4));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
if (!(regeac & TxOKBit) &&
(((rege94 & 0x03FF0000) >> 16) != 0x142)) {
result |= 0x01;
} else { /* if Tx not OK, ignore Rx */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path A Tx IQK fail!!\n"));
+ "Path A Tx IQK fail!!\n");
continue;
}
@@ -1775,7 +1663,7 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
break;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path A Rx IQK fail!!\n"));
+ "Path A Rx IQK fail!!\n");
}
}
/* path A PA off */
@@ -1793,27 +1681,26 @@ static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
u32 regeac, regeb4, regebc, regec4, regecc;
u8 result = 0;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B IQK!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
/* One shot, path B LOK & IQK */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("One shot, path A LOK & IQK!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000002);
rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Delay %d ms for One shot, path B LOK & IQK.\n",
- IQK_DELAY_TIME));
+ "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME);
/* Check failed */
regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeb4 = 0x%x\n", regeb4));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xebc = 0x%x\n", regebc));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xec4 = 0x%x\n", regec4));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xecc = 0x%x\n", regecc));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
(((regebc & 0x03FF0000) >> 16) != 0x42))
result |= 0x01;
@@ -1823,7 +1710,7 @@ static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
(((regecc & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
else
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B Rx IQK fail!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n");
return result;
}
@@ -1837,9 +1724,9 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
u8 i;
u8 retrycount = 2;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B IQK!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
/* path-A IQK setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A IQK setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82110000);
@@ -1852,7 +1739,7 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68160960);
/* LO calibration setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LO calibration setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
/* path-B PA on */
@@ -1862,26 +1749,26 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
for (i = 0; i < retrycount; i++) {
/* One shot, path B LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("One shot, path A LOK & IQK!\n"));
+ "One shot, path A LOK & IQK!\n");
rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xfa000000);
rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Delay %d ms for One shot, path B LOK & IQK.\n", 10));
+ "Delay %d ms for One shot, path B LOK & IQK.\n", 10);
mdelay(IQK_DELAY_TIME * 10);
/* Check failed */
regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeb4 = 0x%x\n", regeb4));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xebc = 0x%x\n", regebc));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xec4 = 0x%x\n", regec4));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xecc = 0x%x\n", regecc));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
if (!(regeac & BIT(31)) &&
(((regeb4 & 0x03FF0000) >> 16) != 0x142))
result |= 0x01;
@@ -1893,7 +1780,7 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
break;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B Rx IQK fail!!\n"));
+ "Path B Rx IQK fail!!\n");
}
}
@@ -1912,7 +1799,7 @@ static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 i;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Save ADDA parameters.\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
for (i = 0; i < regnum; i++)
adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], BMASKDWORD);
}
@@ -1923,7 +1810,7 @@ static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 i;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Save MAC parameters.\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n");
for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
@@ -1937,7 +1824,7 @@ static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
u32 i;
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Reload ADDA power saving parameters !\n"));
+ "Reload ADDA power saving parameters !\n");
for (i = 0; i < regnum; i++)
rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, adda_backup[i]);
}
@@ -1948,7 +1835,7 @@ static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 i;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Reload MAC parameters !\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Reload MAC parameters !\n");
for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
rtl_write_byte(rtlpriv, macreg[i], macbackup[i]);
@@ -1961,7 +1848,7 @@ static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
u32 pathon;
u32 i;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("ADDA ON.\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n");
pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4;
if (patha_on)
pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
@@ -1976,7 +1863,7 @@ static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 i;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("MAC settings for Calibration.\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n");
rtl_write_byte(rtlpriv, macreg[0], 0x3F);
for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
@@ -1988,7 +1875,7 @@ static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A standby mode!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n");
rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x0);
rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 0x00010000);
@@ -2001,7 +1888,7 @@ static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
u32 mode;
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("BB Switch to %s mode!\n", (pi_mode ? "PI" : "SI")));
+ "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
mode = pi_mode ? 0x01000100 : 0x01000000;
rtl_set_bbreg(hw, 0x820, BMASKDWORD, mode);
rtl_set_bbreg(hw, 0x828, BMASKDWORD, mode);
@@ -2033,12 +1920,12 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
const u32 retrycount = 2;
u32 bbvalue;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK for 2.4G :Start!!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n");
if (t == 0) {
bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("==>0x%08x\n", bbvalue));
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQ Calibration for %s\n",
- (is2t ? "2T2R" : "1T1R")));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
+ is2t ? "2T2R" : "1T1R");
/* Save ADDA parameters, turn Path A ADDA on */
_rtl92d_phy_save_adda_registers(hw, adda_reg,
@@ -2076,7 +1963,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
if (is2t)
rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
/* IQ calibration setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x01007c00);
rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
@@ -2084,7 +1971,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
patha_ok = _rtl92d_phy_patha_iqk(hw, is2t);
if (patha_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path A IQK Success!!\n"));
+ "Path A IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
0x3FF0000) >> 16;
result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
@@ -2097,7 +1984,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
} else if (i == (retrycount - 1) && patha_ok == 0x01) {
/* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path A IQK Only Tx Success!!\n"));
+ "Path A IQK Only Tx Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
0x3FF0000) >> 16;
@@ -2106,7 +1993,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
}
}
if (0x00 == patha_ok)
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK failed!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK failed!!\n");
if (is2t) {
_rtl92d_phy_patha_standby(hw);
/* Turn Path B ADDA on */
@@ -2115,7 +2002,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
pathb_ok = _rtl92d_phy_pathb_iqk(hw);
if (pathb_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B IQK Success!!\n"));
+ "Path B IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
BMASKDWORD) & 0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc,
@@ -2128,7 +2015,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
} else if (i == (retrycount - 1) && pathb_ok == 0x01) {
/* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B Only Tx IQK Success!!\n"));
+ "Path B Only Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
BMASKDWORD) & 0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc,
@@ -2137,12 +2024,12 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
}
if (0x00 == pathb_ok)
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B IQK failed!!\n"));
+ "Path B IQK failed!!\n");
}
/* Back to BB mode, load original value */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK:Back to BB mode, load original value!\n"));
+ "IQK:Back to BB mode, load original value!\n");
rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
if (t != 0) {
@@ -2167,7 +2054,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x01008c00);
rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x01008c00);
}
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("<==\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
}
static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
@@ -2199,13 +2086,13 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
/* Note: IQ calibration must be performed after loading
* PHY_REG.txt , and radio_a, radio_b.txt */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK for 5G NORMAL:Start!!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n");
mdelay(IQK_DELAY_TIME * 20);
if (t == 0) {
bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("==>0x%08x\n", bbvalue));
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQ Calibration for %s\n",
- (is2t ? "2T2R" : "1T1R")));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
+ is2t ? "2T2R" : "1T1R");
/* Save ADDA parameters, turn Path A ADDA on */
_rtl92d_phy_save_adda_registers(hw, adda_reg,
rtlphy->adda_backup,
@@ -2242,13 +2129,13 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (is2t)
rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
/* IQ calibration setting */
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK setting!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x10007c00);
rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t);
if (patha_ok == 0x03) {
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK Success!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
0x3FF0000) >> 16;
result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
@@ -2259,14 +2146,14 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
} else if (patha_ok == 0x01) { /* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path A IQK Only Tx Success!!\n"));
+ "Path A IQK Only Tx Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
0x3FF0000) >> 16;
result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
0x3FF0000) >> 16;
} else {
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK Fail!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n");
}
if (is2t) {
/* _rtl92d_phy_patha_standby(hw); */
@@ -2275,7 +2162,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
pathb_ok = _rtl92d_phy_pathb_iqk_5g_normal(hw);
if (pathb_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B IQK Success!!\n"));
+ "Path B IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
@@ -2286,20 +2173,20 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
} else if (pathb_ok == 0x01) { /* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B Only Tx IQK Success!!\n"));
+ "Path B Only Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
0x3FF0000) >> 16;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path B IQK failed!!\n"));
+ "Path B IQK failed!!\n");
}
}
/* Back to BB mode, load original value */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK:Back to BB mode, load original value!\n"));
+ "IQK:Back to BB mode, load original value!\n");
rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
if (t != 0) {
if (is2t)
@@ -2321,7 +2208,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
rtlphy->adda_backup,
IQK_ADDA_REG_NUM);
}
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("<==\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
}
static bool _rtl92d_phy_simularity_compare(struct ieee80211_hw *hw,
@@ -2395,8 +2282,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
rtlhal->macphymode == DUALMAC_DUALPHY;
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("Path A IQ Calibration %s !\n",
- (iqk_ok) ? "Success" : "Failed"));
+ "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
if (final_candidate == 0xFF) {
return;
} else if (iqk_ok) {
@@ -2406,8 +2292,9 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
tx0_a = (val_x * oldval_0) >> 8;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("X = 0x%x, tx0_a = 0x%x,"
- " oldval_0 0x%x\n", val_x, tx0_a, oldval_0));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "X = 0x%x, tx0_a = 0x%x, oldval_0 0x%x\n",
+ val_x, tx0_a, oldval_0);
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x3FF, tx0_a);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
((val_x * oldval_0 >> 7) & 0x1));
@@ -2419,8 +2306,9 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
rtlhal->current_bandtype == BAND_ON_5G)
val_y += 3;
tx0_c = (val_y * oldval_0) >> 8;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Y = 0x%lx, tx0_c = 0x%lx\n",
- val_y, tx0_c));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Y = 0x%lx, tx0_c = 0x%lx\n",
+ val_y, tx0_c);
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000,
((tx0_c & 0x3C0) >> 6));
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x003F0000,
@@ -2428,11 +2316,11 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
if (is2t)
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26),
((val_y * oldval_0 >> 7) & 0x1));
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xC80 = 0x%x\n",
- rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD)));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
+ rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ BMASKDWORD));
if (txonly) {
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("only Tx OK\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
return;
}
reg = result[final_candidate][2];
@@ -2452,8 +2340,8 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
u32 oldval_1, val_x, tx1_a, reg;
long val_y, tx1_c;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B IQ Calibration %s !\n",
- (iqk_ok) ? "Success" : "Failed"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQ Calibration %s !\n",
+ iqk_ok ? "Success" : "Failed");
if (final_candidate == 0xFF) {
return;
} else if (iqk_ok) {
@@ -2463,8 +2351,8 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
tx1_a = (val_x * oldval_1) >> 8;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("X = 0x%x, tx1_a = 0x%x\n",
- val_x, tx1_a));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx1_a = 0x%x\n",
+ val_x, tx1_a);
rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x3FF, tx1_a);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28),
((val_x * oldval_1 >> 7) & 0x1));
@@ -2474,8 +2362,8 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_5G)
val_y += 3;
tx1_c = (val_y * oldval_1) >> 8;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Y = 0x%lx, tx1_c = 0x%lx\n",
- val_y, tx1_c));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx1_c = 0x%lx\n",
+ val_y, tx1_c);
rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000,
((tx1_c & 0x3C0) >> 6));
rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x003F0000,
@@ -2507,7 +2395,7 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
unsigned long flag = 0;
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK:Start!!!channel %d\n", rtlphy->current_channel));
+ "IQK:Start!!!channel %d\n", rtlphy->current_channel);
for (i = 0; i < 8; i++) {
result[0][i] = 0;
result[1][i] = 0;
@@ -2521,7 +2409,7 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
is23simular = false;
is13simular = false;
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK !!!currentband %d\n", rtlhal->current_bandtype));
+ "IQK !!!currentband %d\n", rtlhal->current_bandtype);
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < 3; i++) {
if (rtlhal->current_bandtype == BAND_ON_5G) {
@@ -2573,10 +2461,9 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
regec4 = result[i][6];
regecc = result[i][7];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx "
- "regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n ",
+ "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n",
rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
- regecc));
+ regecc);
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = rege94 = result[final_candidate][0];
@@ -2588,12 +2475,11 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
regec4 = result[final_candidate][6];
regecc = result[final_candidate][7];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK: final_candidate is %x\n", final_candidate));
+ "IQK: final_candidate is %x\n", final_candidate);
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx "
- "regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n ",
+ "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n",
rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
- regecc));
+ regecc);
patha_ok = pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; /* X default value */
@@ -2618,7 +2504,7 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
true;
RT_TRACE(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
- ("\nIQK OK indexforchannel %d.\n", indexforchannel));
+ "IQK OK indexforchannel %d\n", indexforchannel);
}
}
@@ -2629,17 +2515,17 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
u8 indexforchannel;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("channel %d\n", channel));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
/*------Do IQK for normal chip and test chip 5G band------- */
indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("indexforchannel %d done %d\n", indexforchannel,
- rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
+ indexforchannel,
+ rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done);
if (0 && !rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done &&
rtlphy->need_iqk) {
/* Re Do IQK. */
RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
- ("Do IQK Matrix reg for channel:%d....\n", channel));
+ "Do IQK Matrix reg for channel:%d....\n", channel);
rtl92d_phy_iq_calibrate(hw);
} else {
/* Just load the value. */
@@ -2647,8 +2533,8 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
if (((!rtlhal->load_imrandiqk_setting_for2g) &&
indexforchannel == 0) || indexforchannel > 0) {
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- ("Just Read IQK Matrix reg for channel:%d"
- "....\n", channel));
+ "Just Read IQK Matrix reg for channel:%d....\n",
+ channel);
if ((rtlphy->iqk_matrix_regsetting[indexforchannel].
value[0] != NULL)
/*&&(regea4 != 0) */)
@@ -2672,7 +2558,7 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
}
}
rtlphy->need_iqk = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
@@ -2727,8 +2613,8 @@ static void _rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
}
}
smallest_abs_val = 0xffffffff;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("curveindex[%d] = %x\n", i,
- curveindex[i]));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n",
+ i, curveindex[i]);
}
}
@@ -2743,14 +2629,14 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
u32 u4tmp = 0, u4regvalue = 0;
bool bneed_powerdown_radio = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("path %d\n", erfpath));
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("band type = %d\n",
- rtlpriv->rtlhal.current_bandtype));
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("channel = %d\n", channel));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "band type = %d\n",
+ rtlpriv->rtlhal.current_bandtype);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "channel = %d\n", channel);
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {/* Path-A for 5G */
u4tmp = curveindex_5g[channel-1];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("ver 1 set RF-A, 5G, 0x28 = 0x%ulx !!\n", u4tmp));
+ "ver 1 set RF-A, 5G, 0x28 = 0x%ulx !!\n", u4tmp);
if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
rtlpriv->rtlhal.interfaceindex == 1) {
bneed_powerdown_radio =
@@ -2769,7 +2655,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
} else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) {
u4tmp = curveindex_2g[channel-1];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n", u4tmp));
+ "ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n", u4tmp);
if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
rtlpriv->rtlhal.interfaceindex == 0) {
bneed_powerdown_radio =
@@ -2781,14 +2667,14 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
}
rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n",
- rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800)));
+ "ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n",
+ rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800));
if (bneed_powerdown_radio)
_rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
if (rtlpriv->rtlhal.during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
@@ -2836,20 +2722,20 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
RF_SYN_G6, BRFREGOFFSETMASK);
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("PHY_LCK finish delay for %d ms=2\n", timecount));
+ "PHY_LCK finish delay for %d ms=2\n", timecount);
u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, BRFREGOFFSETMASK);
if (index == 0 && rtlhal->interfaceindex == 0) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("path-A / 5G LCK\n"));
+ "path-A / 5G LCK\n");
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("path-B / 2.4G LCK\n"));
+ "path-B / 2.4G LCK\n");
}
memset(&curvecount_val[0], 0, CV_CURVE_CNT * 2);
/* Set LC calibration off */
rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
0x08000, 0x0);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("set RF 0x18[15] = 0\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "set RF 0x18[15] = 0\n");
/* save Curve-counting number */
for (i = 0; i < CV_CURVE_CNT; i++) {
u32 readval = 0, readval2 = 0;
@@ -2899,7 +2785,7 @@ static void _rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("cosa PHY_LCK ver=2\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "cosa PHY_LCK ver=2\n");
_rtl92d_phy_lc_calibrate_sw(hw, is2t);
}
@@ -2917,8 +2803,8 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
rtlphy->lck_inprogress = true;
RTPRINT(rtlpriv, FINIT, INIT_IQK,
- ("LCK:Start!!! currentband %x delay %d ms\n",
- rtlhal->current_bandtype, timecount));
+ "LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount);
if (IS_92D_SINGLEPHY(rtlhal->version)) {
_rtl92d_phy_lc_calibrate(hw, true);
} else {
@@ -2926,7 +2812,7 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
_rtl92d_phy_lc_calibrate(hw, false);
}
rtlphy->lck_inprogress = false;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LCK:Finish!!!\n"));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
@@ -2941,7 +2827,7 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+ RT_ASSERT(false, "cmdtable cannot be NULL\n");
return false;
}
if (cmdtableidx >= cmdtablesz)
@@ -2962,10 +2848,10 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
u8 i;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("settings regs %d default regs %d\n",
- (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
- sizeof(struct iqk_matrix_regs)),
- IQK_MATRIX_REG_NUM));
+ "settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_REG_NUM);
/* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
rtlphy->iqk_matrix_regsetting[i].value[0][0] = 0x100;
@@ -3084,7 +2970,7 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
break;
@@ -3111,7 +2997,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- ("sw_chnl_inprogress false driver sleep or unload\n"));
+ "sw_chnl_inprogress false driver sleep or unload\n");
return 0;
}
while (rtlphy->lck_inprogress && timecount < timeout) {
@@ -3133,19 +3019,18 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
* 5G and 2.4G band. */
if (channel <= 14)
return 0;
- RT_ASSERT((channel > 14), ("5G but channel<=14"));
+ RT_ASSERT((channel > 14), "5G but channel<=14\n");
break;
case BAND_ON_2_4G:
/* Get first channel error when change between
* 5G and 2.4G band. */
if (channel > 14)
return 0;
- RT_ASSERT((channel <= 14), ("2G but channel>14"));
+ RT_ASSERT((channel <= 14), "2G but channel>14\n");
break;
default:
- RT_ASSERT(false,
- ("Invalid WirelessMode(%#x)!!\n",
- rtlpriv->mac80211.mode));
+ RT_ASSERT(false, "Invalid WirelessMode(%#x)!!\n",
+ rtlpriv->mac80211.mode);
break;
}
rtlphy->sw_chnl_inprogress = true;
@@ -3154,7 +3039,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("switch to channel%d\n", rtlphy->current_channel));
+ "switch to channel%d\n", rtlphy->current_channel);
do {
if (!rtlphy->sw_chnl_inprogress)
@@ -3171,7 +3056,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
rtlphy->sw_chnl_inprogress = false;
return 1;
}
@@ -3182,8 +3067,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
de_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -3197,12 +3082,12 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("<---(%#x)\n", rtlphy->current_io_type));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
+ rtlphy->current_io_type);
}
bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
@@ -3212,23 +3097,23 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
bool postprocessing = false;
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress));
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("[IO CMD] Resume DM after scan.\n"));
+ "[IO CMD] Resume DM after scan\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- ("[IO CMD] Pause DM before scan.\n"));
+ "[IO CMD] Pause DM before scan\n");
postprocessing = true;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
} while (false);
@@ -3239,7 +3124,7 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92d_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
return true;
}
@@ -3297,7 +3182,7 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Fail !!! Switch RF timeout.\n"));
+ "Fail !!! Switch RF timeout\n");
return;
}
/* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */
@@ -3332,20 +3217,18 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
InitializeCount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic enable\n"));
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while ((rtstatus != true) &&
- (InitializeCount < 10));
+ } while (!rtstatus && (InitializeCount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("awake, sleeped:%d ms state_"
- "inap:%x\n",
+ "awake, sleeped:%d ms state_inap:%x\n",
jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies),
- rtlpriv->psc.state_inap));
+ ppsc->last_sleep_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_awake_jiffies = jiffies;
_rtl92d_phy_set_rfon(hw);
}
@@ -3360,7 +3243,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
case ERFOFF:
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic disable\n"));
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -3385,41 +3268,40 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
continue;
} else if (rtlpci->pdev->current_state != PCI_D0) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("eRf Off/Sleep: %d times TcbBusyQueu"
- "e[%d] !=0 but lower power state!\n",
- (i + 1), queue_id));
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 but lower power state!\n",
+ i + 1, queue_id);
break;
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("eRf Off/Sleep: %d times TcbBusyQueu"
- "e[%d] =%d "
- "before doze!\n", (i + 1), queue_id,
- skb_queue_len(&ring->queue)));
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("\nERFOFF: %d times TcbBusyQueue[%d] "
- "= %d !\n",
- MAX_DOZE_WAITING_TIMES_9x, queue_id,
- skb_queue_len(&ring->queue)));
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x, queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("Set rfsleep awaked:%d ms\n",
- jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies)));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, ("sleep awaked:%d ms "
- "state_inap:%x\n", jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies), rtlpriv->psc.state_inap));
+ "Set rfsleep awaked:%d ms\n",
+ jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awaked:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92d_phy_set_rfsleep(hw);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
bresult = false;
break;
}
@@ -3437,17 +3319,17 @@ void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
switch (rtlhal->macphymode) {
case DUALMAC_DUALPHY:
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("MacPhyMode: DUALMAC_DUALPHY\n"));
+ "MacPhyMode: DUALMAC_DUALPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF3);
break;
case SINGLEMAC_SINGLEPHY:
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("MacPhyMode: SINGLEMAC_SINGLEPHY\n"));
+ "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF4);
break;
case DUALMAC_SINGLEPHY:
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("MacPhyMode: DUALMAC_SINGLEPHY\n"));
+ "MacPhyMode: DUALMAC_SINGLEPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF1);
break;
}
@@ -3578,7 +3460,7 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
}
}
if (i == 200)
- RT_ASSERT(false, ("Another mac power off over time\n"));
+ RT_ASSERT(false, "Another mac power off over time\n");
}
}
@@ -3615,7 +3497,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 rfpath, i;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==>\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
/* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* r_select_5G for path_A/B,0x878 */
@@ -3764,7 +3646,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
} else {
rtl92d_phy_enable_anotherphy(hw, false);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("MAC1 use DBI to update 0x888"));
+ "MAC1 use DBI to update 0x888\n");
/* 0x888 */
rtl92de_write_dword_dbi(hw, RFPGA0_ADDALLOCKEN,
rtl92de_read_dword_dbi(hw,
@@ -3789,9 +3671,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
BRFREGOFFSETMASK);
}
for (i = 0; i < 2; i++)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[i]));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[i]);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index a52c824b41e3..f074952bf25c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
index 131acc306fcc..ebb1d5f5e7b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -998,7 +998,6 @@
#define SCR_RXBCUSEDK BIT(7)
/* General definitions */
-#define MAC_ADDR_LEN 6
#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
#define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC 127
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index db27cebaac2c..3066a7fb0b57 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -50,8 +50,8 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
BIT(11), 0x01);
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- ("20M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]));
+ "20M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
}
break;
@@ -62,13 +62,13 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11),
0x00);
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- ("40M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]));
+ "40M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
}
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", bandwidth));
+ "unknown bandwidth: %#X\n", bandwidth);
break;
}
}
@@ -127,23 +127,23 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
tmpval = tx_agc[RF90_PATH_A] & 0xff;
rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_A_CCK1_MCS32));
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_A_CCK1_MCS32);
tmpval = tx_agc[RF90_PATH_A] >> 8;
rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK11_A_CCK2_11));
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] >> 24;
rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK11_A_CCK2_11));
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
- RTXAGC_B_CCK1_55_MCS32));
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK1_55_MCS32);
}
static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
@@ -165,8 +165,8 @@ static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
(powerbase0 << 8) | powerbase0;
*(ofdmbase + i) = powerbase0;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- (" [OFDM power base index rf(%c) = 0x%x]\n",
- ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(ofdmbase + i));
}
for (i = 0; i < 2; i++) {
@@ -179,8 +179,8 @@ static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
(powerbase1 << 8) | powerbase1;
*(mcsbase + i) = powerbase1;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- (" [MCS power base index rf(%c) = 0x%x]\n",
- ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(mcsbase + i));
}
}
@@ -232,9 +232,9 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
(rf ? 8 : 0)] + ((index < 2) ?
powerbase0[rf] :
powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
- "performance, writeval(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeval));
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "RTK better performance, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 1:
if (rtlphy->pwrgroup_cnt == 1)
@@ -253,33 +253,31 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
powerbase0[rf] :
powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Realtek regulatory, "
- "20MHz, writeval(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'),
- writeval));
+ "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
}
break;
case 2:
writeval = ((index < 2) ? powerbase0[rf] :
powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("Better regulatory, "
- "writeval(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeval));
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Better regulatory, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 3:
chnlgroup = 0;
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("customer's limit, 40MHz rf(%c) = "
- "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ "customer's limit, 40MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
rtlefuse->pwrgroup_ht40[rf]
- [channel - 1]));
+ [channel - 1]);
} else {
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("customer's limit, 20MHz rf(%c) = "
- "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ "customer's limit, 20MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
rtlefuse->pwrgroup_ht20[rf]
- [channel - 1]));
+ [channel - 1]);
}
for (i = 0; i < 4; i++) {
pwr_diff_limit[i] =
@@ -308,13 +306,13 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
(pwr_diff_limit[1] << 8) |
(pwr_diff_limit[0]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Customer's limit rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), customer_limit));
+ "Customer's limit rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', customer_limit);
writeval = customer_limit + ((index < 2) ?
powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Customer, writeval rf(%c)= 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeval));
+ "Customer, writeval rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
default:
chnlgroup = 0;
@@ -323,9 +321,8 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
(rf ? 8 : 0)] + ((index < 2) ?
powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("RTK better performance, writeval "
- "rf(%c) = 0x%x\n",
- ((rf == 0) ? 'A' : 'B'), writeval));
+ "RTK better performance, writeval rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
}
*(p_outwriteval + rf) = writeval;
@@ -367,7 +364,7 @@ static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset = regoffset_b[index];
rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- ("Set 0x%x = %08x\n", regoffset, writeval));
+ "Set 0x%x = %08x\n", regoffset, writeval);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
(regoffset == RTXAGC_A_MCS15_MCS12 ||
regoffset == RTXAGC_B_MCS15_MCS12)) ||
@@ -423,11 +420,11 @@ bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
rtlhal->during_mac0init_radiob = false;
rtlhal->during_mac1init_radioa = false;
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("===>\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
/* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */
u1btmp = rtl_read_byte(rtlpriv, mac_reg);
if (!(u1btmp & mac_on_bit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable BB & RF\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
/* Enable BB and RF power */
rtl92de_write_dword_dbi(hw, REG_SYS_ISO_CTRL,
rtl92de_read_dword_dbi(hw, REG_SYS_ISO_CTRL, direct) |
@@ -437,7 +434,7 @@ bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
* and radio_b.txt has been load. */
bresult = false;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<===\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
return bresult;
}
@@ -453,17 +450,17 @@ void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0)
rtlhal->during_mac0init_radiob = false;
rtlhal->during_mac1init_radioa = false;
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("====>\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
/* check MAC0 enable or not again now, if
* enabled, not power down radio A. */
u1btmp = rtl_read_byte(rtlpriv, mac_reg);
if (!(u1btmp & mac_on_bit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("power down\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
/* power down RF radio A according to YuNan's advice. */
rtl92de_write_dword_dbi(hw, RFPGA0_XA_LSSIPARAMETER,
0x00000000, direct);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<====\n"));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
}
bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
@@ -604,9 +601,9 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
u4_regvalue);
break;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Radio[%d] Fail!!", rfpath));
+ "Radio[%d] Fail!!", rfpath);
goto phy_rf_cfg_fail;
}
@@ -620,7 +617,7 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
rtl92d_phy_powerdown_anotherphy(hw, false);
else if (need_pwrdown_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
phy_rf_cfg_fail:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
index 74b9cfc39a83..0fe1a48593e8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 7911c9c87085..4898c502974d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,11 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
@@ -44,6 +39,8 @@
#include "trx.h"
#include "led.h"
+#include <linux/module.h>
+
static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -94,7 +91,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- const struct firmware *firmware;
static int header_print;
rtlpriv->dm.dm_initialgain_enable = true;
@@ -154,9 +150,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
if (!rtlpriv->psc.inactiveps)
- pr_info("rtl8192ce: Power Save off (module option)\n");
+ pr_info("Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
- pr_info("rtl8192ce: FW Power Save off (module option)\n");
+ pr_info("FW Power Save off (module option)\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
/* for ASPM, you can close aspm through
@@ -170,41 +166,38 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
else if (rtlpriv->psc.reg_fwctrl_lps == 3)
rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+ /* for early mode */
+ rtlpriv->rtlhal.earlymode_enable = true;
+ for (tid = 0; tid < 8; tid++)
+ skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
+
+ /* Only load firmware for first MAC */
+ if (header_print)
+ return 0;
+
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't alloc buffer for fw.\n"));
+ "Can't alloc buffer for fw\n");
return 1;
}
- if (!header_print) {
- pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
- header_print++;
- }
+ rtlpriv->max_fw_size = 0x8000;
+ pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ header_print++;
+
/* request fw */
- err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
- rtlpriv->io.dev);
+ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to request firmware!\n"));
- return 1;
- }
- if (firmware->size > 0x8000) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Firmware is too big!\n"));
- release_firmware(firmware);
+ "Failed to request firmware!\n");
return 1;
}
- memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
- rtlpriv->rtlhal.fwsize = firmware->size;
- release_firmware(firmware);
- /* for early mode */
- rtlpriv->rtlhal.earlymode_enable = true;
- for (tid = 0; tid < 8; tid++)
- skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
return 0;
}
@@ -424,7 +417,7 @@ static int __init rtl92de_module_init(void)
ret = pci_register_driver(&rtl92de_driver);
if (ret)
- RT_ASSERT(false, (": No device found\n"));
+ RT_ASSERT(false, "No device found\n");
return ret;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/rtlwifi/rtl8192de/sw.h
index c95e47de1346..0e6035b8fd86 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/rtlwifi/rtl8192de/table.c
index bad7f9449ecf..8ea6f528dfa6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/table.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/rtlwifi/rtl8192de/table.h
index 93f30ca62d8f..8b724a86117a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/table.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/table.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 3637c0c33525..a7f6126e2f86 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -602,8 +602,8 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- ("Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num));
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl92de_insert_emcontent(ptcb_desc,
(u8 *)(skb->data));
}
@@ -700,7 +700,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- ("Enable RDG function.\n"));
+ "Enable RDG function\n");
SET_TX_DESC_RDG_ENABLE(pdesc, 1);
SET_TX_DESC_HTC(pdesc, 1);
}
@@ -726,7 +726,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_PKT_ID(pdesc, 8);
}
SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -776,7 +776,7 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C Tx Cmd Content\n", pdesc, TX_DESC_SIZE);
+ "H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
wmb();
SET_TX_DESC_OWN(pdesc, 1);
}
@@ -793,8 +793,8 @@ void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, ("ERR txdesc :%d"
- " not process\n", desc_name));
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -813,8 +813,8 @@ void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, ("ERR rxdesc :%d "
- "not process\n", desc_name));
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
@@ -833,8 +833,8 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
break;
default:
- RT_ASSERT(false, ("ERR txdesc :%d "
- "not process\n", desc_name));
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -847,8 +847,8 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_RX_DESC_PKT_LEN(pdesc);
break;
default:
- RT_ASSERT(false, ("ERR rxdesc :%d "
- "not process\n", desc_name));
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 4d55d0b6816d..0dc736c2723b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index c6c044816d39..d1b0a1e14971 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 4203a8531ca0..fbabae17259e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -170,9 +170,9 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
- "eeprom_thermalmeter 0x%x\n", thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter));
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermal meter 0x%x\n",
+ thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
if (thermalvalue) {
rtlpriv->dm.thermalvalue = thermalvalue;
@@ -282,11 +282,11 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
}
if (ra->pre_ratr_state != ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, ("RSSI = %ld "
- "RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
- rtlpriv->dm.undecorated_smoothed_pwdb,
- ra->ratr_state,
- ra->pre_ratr_state, ra->ratr_state));
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
+ rtlpriv->dm.undecorated_smoothed_pwdb,
+ ra->ratr_state,
+ ra->pre_ratr_state, ra->ratr_state);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
ra->ratr_state);
@@ -586,7 +586,7 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- ("Not connected to any\n"));
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
@@ -599,22 +599,22 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb));
+ "AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb);
}
txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
index 9051a556acc4..e1b19a641765 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
index 3fda6b1dcf46..380e7d4b1ccf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -66,7 +66,7 @@ static bool _rtl92s_firmware_enable_cpu(struct ieee80211_hw *hw)
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & IMEM_RDY) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("IMEM Ready after CPU has refilled.\n"));
+ "IMEM Ready after CPU has refilled\n");
break;
}
@@ -120,9 +120,8 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
return 0x22;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Unknown RF type(%x)\n",
- rtlphy->rf_type));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n",
+ rtlphy->rf_type);
break;
}
return 0x22;
@@ -177,7 +176,7 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Size over FIRMWARE_CODE_SIZE!\n"));
+ "Size over FIRMWARE_CODE_SIZE!\n");
return false;
}
@@ -231,8 +230,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
short pollingcnt = 1000;
bool rtstatus = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("LoadStaus(%d)\n",
- loadfw_status));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LoadStaus(%d)\n", loadfw_status);
firmware->fwstatus = (enum fw_status)loadfw_status;
@@ -248,8 +247,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("FW_STATUS_LOAD_IMEM"
- " FAIL CPU, Status=%x\r\n", cpustatus));
+ "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n",
+ cpustatus);
goto status_check_fail;
}
break;
@@ -266,16 +265,16 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("FW_STATUS_LOAD_EMEM"
- " FAIL CPU, Status=%x\r\n", cpustatus));
+ "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n",
+ cpustatus);
goto status_check_fail;
}
/* Turn On CPU */
rtstatus = _rtl92s_firmware_enable_cpu(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Enable CPU fail!\n"));
+ "Enable CPU fail!\n");
goto status_check_fail;
}
break;
@@ -291,14 +290,14 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Polling DMEM code done"
- " fail ! cpustatus(%#x)\n", cpustatus));
+ "Polling DMEM code done fail ! cpustatus(%#x)\n",
+ cpustatus);
goto status_check_fail;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("DMEM code download success,"
- " cpustatus(%#x)\n", cpustatus));
+ "DMEM code download success, cpustatus(%#x)\n",
+ cpustatus);
/* Prevent Delay too much and being scheduled out */
/* Polling Load Firmware ready */
@@ -311,14 +310,14 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
} while (pollingcnt--);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Polling Load Firmware ready,"
- " cpustatus(%x)\n", cpustatus));
+ "Polling Load Firmware ready, cpustatus(%x)\n",
+ cpustatus);
if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
(pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Polling Load Firmware"
- " ready fail ! cpustatus(%x)\n", cpustatus));
+ "Polling Load Firmware ready fail ! cpustatus(%x)\n",
+ cpustatus);
goto status_check_fail;
}
@@ -332,7 +331,7 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
RCR_APP_ICV | RCR_APP_MIC));
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Current RCR settings(%#x)\n", tmpu4b));
+ "Current RCR settings(%#x)\n", tmpu4b);
/* Set to normal mode. */
rtl_write_byte(rtlpriv, LBKMD_SEL, LBK_NORMAL);
@@ -340,14 +339,15 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Unknown status check!\n"));
+ "Unknown status check!\n");
rtstatus = false;
break;
}
status_check_fail:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("loadfw_status(%d), "
- "rtstatus(%x)\n", loadfw_status, rtstatus));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "loadfw_status(%d), rtstatus(%x)\n",
+ loadfw_status, rtstatus);
return rtstatus;
}
@@ -364,7 +364,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
u8 fwstatus = FW_STATUS_INIT;
bool rtstatus = true;
- if (!rtlhal->pfirmware)
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
firmware = (struct rt_firmware *)rtlhal->pfirmware;
@@ -378,17 +378,17 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
firmware->firmwareversion = byte(pfwheader->version, 0);
firmware->pfwheader->fwpriv.hci_sel = 1;/* pcie */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("signature:%x, version:"
- "%x, size:%x,"
- "imemsize:%x, sram size:%x\n", pfwheader->signature,
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n",
+ pfwheader->signature,
pfwheader->version, pfwheader->dmem_size,
- pfwheader->img_imem_size, pfwheader->img_sram_size));
+ pfwheader->img_imem_size, pfwheader->img_sram_size);
/* 2. Retrieve IMEM image. */
if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
sizeof(firmware->fw_imem))) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("memory for data image is less than IMEM required\n"));
+ "memory for data image is less than IMEM required\n");
goto fail;
} else {
puc_mappedfile += fwhdr_size;
@@ -401,7 +401,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
/* 3. Retriecve EMEM image. */
if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("memory for data image is less than EMEM required\n"));
+ "memory for data image is less than EMEM required\n");
goto fail;
} else {
puc_mappedfile += firmware->fw_imem_len;
@@ -436,7 +436,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Unexpected Download step!!\n"));
+ "Unexpected Download step!!\n");
goto fail;
break;
}
@@ -445,15 +445,15 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
rtstatus = _rtl92s_firmware_downloadcode(hw, puc_mappedfile,
ul_filelength);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("fail!\n"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
goto fail;
}
/* <3> Check whether load FW process is ready */
rtstatus = _rtl92s_firmware_checkready(hw, fwstatus);
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("fail!\n"));
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
goto fail;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
index 74cc503efe8a..b4afff626437 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -30,6 +30,7 @@
#define __REALTEK_FIRMWARE92S_H__
#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000
+#define RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE 90000
#define RTL8190_CPU_START_OFFSET 0x80
/* Firmware Local buffer size. 64k */
#define MAX_FIRMWARE_CODE_SIZE 0xFF00
@@ -217,7 +218,7 @@ struct rt_firmware {
u8 fw_emem[RTL8190_MAX_FIRMWARE_CODE_SIZE];
u32 fw_imem_len;
u32 fw_emem_len;
- u8 sz_fw_tmpbuffer[164000];
+ u8 sz_fw_tmpbuffer[RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE];
u32 sz_fw_tmpbufferlen;
u16 cmdpacket_fragthresold;
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index c474486e3911..b141c35bf926 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "../wifi.h"
#include "../efuse.h"
#include "../base.h"
@@ -80,8 +78,8 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default: {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
break;
}
}
@@ -140,7 +138,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, SLOT_TIME, val[0]);
@@ -185,8 +183,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -201,8 +199,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg));
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -244,8 +242,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, AGGLEN_LMT_H, regtoset);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset));
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -282,8 +280,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("HW_VAR_ACM_CTRL acm set "
- "failed: eACI is %d\n", acm));
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -299,13 +297,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
}
RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- ("HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl));
+ "HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl);
rtl_write_byte(rtlpriv, AcmHwCtrl, acm_ctrl);
break;
}
@@ -404,7 +402,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -415,14 +413,14 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value = 0x0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("PairwiseEncAlgorithm = %d "
- "GroupEncAlgorithm = %d\n",
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm));
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("not open hw encryption\n"));
+ "not open hw encryption\n");
return;
}
@@ -433,8 +431,8 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
sec_reg_value |= SCR_RXUSEDK;
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, ("The SECR-value %x\n",
- sec_reg_value));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
+ sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -718,8 +716,8 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
if (pollingcnt <= 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Polling TXDMA_INIT_VALUE "
- "timeout!! Current TCR(%#x)\n", tmpu1b));
+ "Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n",
+ tmpu1b);
tmpu1b = rtl_read_byte(rtlpriv, CMDR);
rtl_write_byte(rtlpriv, CMDR, tmpu1b & (~TXDMA_EN));
udelay(2);
@@ -870,10 +868,10 @@ static void _rtl92se_macconfig_after_fwdownload(struct ieee80211_hw *hw)
/* Change Program timing */
rtl_write_byte(rtlpriv, REG_EFUSE_CTRL + 3, 0x72);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("EFUSE CONFIG OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "EFUSE CONFIG OK\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
}
@@ -951,12 +949,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtstatus = rtl92s_download_fw(hw);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("Failed to download FW. "
- "Init HW without FW now.., Please copy FW into"
- "/lib/firmware/rtlwifi\n"));
- rtlhal->fw_ready = false;
- } else {
- rtlhal->fw_ready = true;
+ "Failed to download FW. Init HW without FW now... "
+ "Please copy FW into /lib/firmware/rtlwifi\n");
+ return 1;
}
/* After FW download, we have to reset MAC register */
@@ -967,8 +962,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtlhal->fwcmd_ioparam = rtl_read_dword(rtlpriv, LBUS_ADDR_MASK);
/* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
- if (rtl92s_phy_mac_config(hw) != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("MAC Config failed\n"));
+ if (!rtl92s_phy_mac_config(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
return rtstatus;
}
@@ -977,8 +972,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, CMDR, 0x37FC);
/* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
- if (rtl92s_phy_bb_config(hw) != true) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("BB Config failed\n"));
+ if (!rtl92s_phy_bb_config(hw)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
return rtstatus;
}
@@ -1013,8 +1008,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
else
rtl_write_byte(rtlpriv, RF_CTRL, 0x07);
- if (rtl92s_phy_rf_config(hw) != true) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("RF Config failed\n"));
+ if (!rtl92s_phy_rf_config(hw)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
return rtstatus;
}
@@ -1110,7 +1105,7 @@ void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
if (check_bssid) {
reg_rcr |= (RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
- } else if (check_bssid == false) {
+ } else if (!check_bssid) {
reg_rcr &= (~RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
}
@@ -1129,26 +1124,26 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to NO LINK!\n"));
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to Ad Hoc!\n"));
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to STA!\n"));
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- ("Set Network type to AP!\n"));
+ "Set Network type to AP!\n");
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Network type %d not support!\n", type));
+ "Network type %d not supported!\n", type);
return 1;
break;
@@ -1202,7 +1197,7 @@ void rtl92se_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, EDCAPARA_VO, 0x2f3222);
break;
default:
- RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ RT_ASSERT(false, "invalid aci: %d !\n", aci);
break;
}
}
@@ -1219,9 +1214,14 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw)
void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv;
+ struct rtl_pci *rtlpci;
+ rtlpriv = rtl_priv(hw);
+ /* if firmware not available, no interrupts */
+ if (!rtlpriv || !rtlpriv->max_fw_size)
+ return;
+ rtlpci = rtl_pcidev(rtl_pcipriv(hw));
rtl_write_dword(rtlpriv, INTA_MASK, 0);
rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
@@ -1583,8 +1583,8 @@ void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1627,7 +1627,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->epromtype == EEPROM_93C46) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("RTL819X Not boot from eeprom, check it !!"));
+ "RTL819X Not boot from eeprom, check it !!\n");
} else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
rtl_efuse_shadow_map_update(hw);
@@ -1636,16 +1636,16 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
HWSET_MAX_SIZE_92S);
}
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE_92S);
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1663,15 +1663,15 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROMId = 0x%4x\n", eeprom_id));
+ "EEPROMId = 0x%4x\n", eeprom_id);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
@@ -1681,8 +1681,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
for (i = 0; i < 6; i++)
rtl_write_byte(rtlpriv, MACIDR0 + i, rtlefuse->dev_addr[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("%pM\n", rtlefuse->dev_addr));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
/* Get Tx Power Level by Channel */
/* Read Tx power of Channel 1 ~ 14 from EEPROM. */
@@ -1707,23 +1706,24 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
- i, rtlefuse->eeprom_chnlarea_txpwr_cck
- [rf_path][i]));
+ "RF(%d) EEPROM CCK Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->eeprom_chnlarea_txpwr_cck
+ [rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
- rf_path, i,
- rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
- [rf_path][i]));
+ "RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
+ [rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++)
for (i = 0; i < 3; i++)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
- ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
- rf_path, i,
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
- [rf_path][i]));
+ "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
+ [rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++) {
@@ -1754,11 +1754,11 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
for (i = 0; i < 14; i++) {
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
- "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
- rtlefuse->txpwrlevel_cck[rf_path][i],
- rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+ "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n",
+ rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i]);
}
}
@@ -1791,13 +1791,13 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
0xf0) >> 4);
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
- rf_path, i,
- rtlefuse->pwrgroup_ht20[rf_path][i]));
+ "RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht20[rf_path][i]);
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
- rf_path, i,
- rtlefuse->pwrgroup_ht40[rf_path][i]));
+ "RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht40[rf_path][i]);
}
}
@@ -1852,27 +1852,27 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
(hwinfo[EEPROM_REGULATORY] & 0x1);
}
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+ "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+ "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+ "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
for (i = 0; i < 14; i++)
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
- ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
- rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+ "RF-B Legacy to HT40 Diff[%d] = 0x%x\n",
+ i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
- RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("TxPwrSafetyFlag = %d\n",
- rtlefuse->txpwr_safetyflag));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ "TxPwrSafetyFlag = %d\n", rtlefuse->txpwr_safetyflag);
/* Read RF-indication and Tx Power gain
* index diff of legacy to HT OFDM rate. */
@@ -1881,8 +1881,8 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->legacy_httxpowerdiff =
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
- RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("TxPowerDiff = %#x\n",
- rtlefuse->eeprom_txpowerdiff));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ "TxPowerDiff = %#x\n", rtlefuse->eeprom_txpowerdiff);
/* Get TSSI value for each path. */
usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
@@ -1890,16 +1890,16 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B];
rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
- RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
- rtlefuse->eeprom_tssi[RF90_PATH_A],
- rtlefuse->eeprom_tssi[RF90_PATH_B]));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+ rtlefuse->eeprom_tssi[RF90_PATH_A],
+ rtlefuse->eeprom_tssi[RF90_PATH_B]);
/* Read antenna tx power offset of B/C/D to A from EEPROM */
/* and read ThermalMeter from EEPROM */
tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER];
rtlefuse->eeprom_thermalmeter = tempval;
- RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("thermalmeter = 0x%x\n",
- rtlefuse->eeprom_thermalmeter));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
/* ThermalMeter, BIT(0)~3 for RFIC1, BIT(4)~7 for RFIC2 */
rtlefuse->thermalmeter[0] = (rtlefuse->eeprom_thermalmeter & 0x1f);
@@ -1915,8 +1915,8 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Version ID, Channel plan */
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->txpwr_fromeprom = true;
- RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("EEPROM ChannelPlan = 0x%4x\n",
- rtlefuse->eeprom_channelplan));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
/* Read Customer ID or Board Type!!! */
tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE];
@@ -1937,14 +1937,14 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
if (!(tempval & BIT(0))) {
rtlefuse->b1x1_recvcombine = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("RF_TYPE=1T2R but only 1SS\n"));
+ "RF_TYPE=1T2R but only 1SS\n");
}
}
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("EEPROM Customer ID: 0x%2x",
- rtlefuse->eeprom_oemid));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
+ rtlefuse->eeprom_oemid);
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
@@ -1959,19 +1959,19 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, EPROM_CMD);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92se_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
rtlefuse->autoload_failflag = true;
}
}
@@ -2071,8 +2071,8 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
else
rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_REFRESH_BG);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- ("%x\n", rtl_read_dword(rtlpriv, ARFR0)));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, ARFR0));
}
static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2224,8 +2224,8 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
mask |= (bmulticast ? 1 : 0) << 9 | (macid & 0x1f) << 4 | (band & 0xf);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_TRACE, ("mask = %x, bitmap = %x\n",
- mask, ratr_bitmap));
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_TRACE, "mask = %x, bitmap = %x\n",
+ mask, ratr_bitmap);
rtl_write_dword(rtlpriv, 0x2c4, ratr_bitmap);
rtl_write_dword(rtlpriv, WFM5, (FW_RA_UPDATE_MASK | (mask << 8)));
@@ -2301,14 +2301,14 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
if ((ppsc->hwradiooff) && (rfpwr_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("RFKILL-HW Radio ON, RF ON\n"));
+ "RFKILL-HW Radio ON, RF ON\n");
rfpwr_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
- } else if ((ppsc->hwradiooff == false) && (rfpwr_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("RFKILL-HW Radio OFF, RF OFF\n"));
+ } else if ((!ppsc->hwradiooff) && (rfpwr_toset == ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_RF,
+ DBG_DMESG, "RFKILL-HW Radio OFF, RF OFF\n");
rfpwr_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2372,7 +2372,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2401,7 +2401,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
enc_algo = CAM_TKIP;
break;
}
@@ -2419,9 +2419,8 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv,
- COMP_SEC, DBG_EMERG,
- ("Can not find free hw"
- " security cam entry\n"));
+ COMP_SEC, DBG_EMERG,
+ "Can not find free hw security cam entry\n");
return;
}
} else {
@@ -2435,30 +2434,31 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("delete one entry, entry_id is %d\n",
- entry_id));
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- ("The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]));
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("add one entry\n"));
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
- "Pairwiase Key content :",
- rtlpriv->sec.pairwise_key,
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ "Pairwise Key content",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->sec.
+ key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set Pairwiase key\n"));
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -2466,7 +2466,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
rtlpriv->sec.key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- ("set group key\n"));
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index 6160a9bfe98a..1886c2644a26 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/rtlwifi/rtl8192se/led.c
index e3fe7c90ebf4..44949b5cbb87 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -52,8 +52,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", LEDCFG, pled->ledpin));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
@@ -68,7 +68,7 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = true;
@@ -76,12 +76,15 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv;
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- ("LedAddr:%X ledpin=%d\n", LEDCFG, pled->ledpin));
+ rtlpriv = rtl_priv(hw);
+ if (!rtlpriv || rtlpriv->max_fw_size)
+ return;
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
@@ -101,7 +104,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
pled->ledon = false;
@@ -141,8 +144,7 @@ void rtl92se_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
- ledaction));
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92se_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/rtlwifi/rtl8192se/led.h
index 8cce3870af3c..2182dbeb5f32 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/led.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f10ac1ad9087..4a499928e4c6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
@@ -58,16 +56,15 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue = 0, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- ("BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
@@ -79,8 +76,9 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask, data));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -90,8 +88,9 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x)\n", regaddr, bitmask, data));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
@@ -149,8 +148,8 @@ static u32 _rtl92s_phy_rf_serial_read(struct ieee80211_hw *hw,
retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
BLSSI_READBACK_DATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rflssi_readback, retvalue));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rflssi_readback, retvalue);
return retvalue;
@@ -172,8 +171,8 @@ static void _rtl92s_phy_rf_serial_write(struct ieee80211_hw *hw,
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
@@ -183,8 +182,9 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
- "bitmask(%#x)\n", regaddr, rfpath, bitmask));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -195,9 +195,9 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
- "bitmask(%#x), original_value(%#x)\n", regaddr, rfpath,
- bitmask, original_value));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -212,8 +212,9 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (!((rtlphy->rf_pathmap >> rfpath) & 0x1))
return;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
- " data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -228,8 +229,9 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x), "
- "data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
@@ -249,7 +251,7 @@ void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Unknown operation.\n"));
+ "Unknown operation\n");
break;
}
}
@@ -264,9 +266,9 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u8 reg_bw_opmode;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (rtlphy->set_bwmode_inprogress)
return;
@@ -290,8 +292,7 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n",
- rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
@@ -316,13 +317,13 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
rtl92s_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
@@ -332,7 +333,7 @@ static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+ RT_ASSERT(false, "cmdtable cannot be NULL\n");
return false;
}
@@ -377,7 +378,7 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
RT_ASSERT((channel >= 1 && channel <= 14),
- ("illegal channel for Zebra: %d\n", channel));
+ "invalid channel for Zebra: %d\n", channel);
_rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -438,7 +439,7 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
break;
}
@@ -458,9 +459,8 @@ u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
u32 delay;
bool ret;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- ("switch to channel%d\n",
- rtlphy->current_channel));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n",
+ rtlphy->current_channel);
if (rtlphy->sw_chnl_inprogress)
return 0;
@@ -496,7 +496,7 @@ u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
return 1;
}
@@ -556,20 +556,19 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
InitializeCount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic enable\n"));
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while ((rtstatus != true) &&
- (InitializeCount < 10));
+ } while (!rtstatus && (InitializeCount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("awake, sleeped:%d ms "
- "state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies),
- rtlpriv->psc.state_inap));
+ "awake, sleeped:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_awake_jiffies = jiffies;
rtl_write_word(rtlpriv, CMDR, 0x37FC);
rtl_write_byte(rtlpriv, TXPAUSE, 0x00);
@@ -587,7 +586,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
case ERFOFF:{
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- ("IPS Set eRf nic disable\n"));
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -613,11 +612,9 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
continue;
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("eRf Off/Sleep: "
- "%d times TcbBusyQueue[%d] = "
- "%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue)));
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] = %d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
@@ -625,31 +622,30 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("\nERFOFF: %d times"
- "TcbBusyQueue[%d] = %d !\n",
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
MAX_DOZE_WAITING_TIMES_9x,
queue_id,
- skb_queue_len(&ring->queue)));
+ skb_queue_len(&ring->queue));
break;
}
}
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("Set ERFSLEEP awaked:%d ms\n",
+ "Set ERFSLEEP awaked:%d ms\n",
jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies)));
+ ppsc->last_awake_jiffies));
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- ("sleep awaked:%d ms "
- "state_inap:%x\n", jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies),
- rtlpriv->psc.state_inap));
+ "sleep awaked:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92se_phy_set_rf_sleep(hw);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("switch case not process\n"));
+ "switch case not processed\n");
bresult = false;
break;
}
@@ -681,30 +677,28 @@ static void _rtl92s_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ int index;
if (reg_addr == RTXAGC_RATE18_06)
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
- data;
- if (reg_addr == RTXAGC_RATE54_24)
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
- data;
- if (reg_addr == RTXAGC_CCK_MCS32)
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
- data;
- if (reg_addr == RTXAGC_MCS03_MCS00)
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
- data;
- if (reg_addr == RTXAGC_MCS07_MCS04)
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
- data;
- if (reg_addr == RTXAGC_MCS11_MCS08)
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
- data;
- if (reg_addr == RTXAGC_MCS15_MCS12) {
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
- data;
+ index = 0;
+ else if (reg_addr == RTXAGC_RATE54_24)
+ index = 1;
+ else if (reg_addr == RTXAGC_CCK_MCS32)
+ index = 6;
+ else if (reg_addr == RTXAGC_MCS03_MCS00)
+ index = 2;
+ else if (reg_addr == RTXAGC_MCS07_MCS04)
+ index = 3;
+ else if (reg_addr == RTXAGC_MCS11_MCS08)
+ index = 4;
+ else if (reg_addr == RTXAGC_MCS15_MCS12)
+ index = 5;
+ else
+ return;
+
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data;
+ if (index == 5)
rtlphy->pwrgroup_cnt++;
- }
}
static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
@@ -993,9 +987,9 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
rtstatus = false;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Write BB Reg Fail!!"));
+ "Write BB Reg Fail!!\n");
goto phy_BB8190_Config_ParaFile_Fail;
}
@@ -1007,17 +1001,16 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl92s_phy_config_bb_with_pg(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("_rtl92s_phy_bb_config_parafile(): "
- "BB_PG Reg Fail!!"));
+ "_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n");
goto phy_BB8190_Config_ParaFile_Fail;
}
/* 3. BB AGC table Initialization */
rtstatus = _rtl92s_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("%s(): AGC Table Fail\n", __func__);
goto phy_BB8190_Config_ParaFile_Fail;
}
@@ -1053,7 +1046,7 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
radio_b_tblen = RADIOB_ARRAYLENGTH;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
rtstatus = true;
switch (rfpath) {
@@ -1175,11 +1168,11 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw)
(rtlphy->rf_type == RF_2T2R && rf_num != 2) ||
(rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("RF_Type(%x) does not match "
- "RF_Num(%x)!!\n", rtlphy->rf_type, rf_num));
+ "RF_Type(%x) does not match RF_Num(%x)!!\n",
+ rtlphy->rf_type, rf_num);
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("path1 0x%x, path2 0x%x, pathmap "
- "0x%x\n", path1, path2, pathmap));
+ "path1 0x%x, path2 0x%x, pathmap 0x%x\n",
+ path1, path2, pathmap);
}
return rtstatus;
@@ -1214,20 +1207,20 @@ void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] = rtl_get_bbreg(hw,
ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Default initial gain "
- "(c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
rtlphy->default_initialgain[0],
rtlphy->default_initialgain[1],
rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]));
+ rtlphy->default_initialgain[3]);
/* read framesync */
rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- ("Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync));
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
@@ -1274,7 +1267,7 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
/* [0]:RF-A, [1]:RF-B */
u8 cckpowerlevel[2], ofdmpowerLevel[2];
- if (rtlefuse->txpwr_fromeprom == false)
+ if (!rtlefuse->txpwr_fromeprom)
return;
/* Mainly we use RF-A Tx Power to write the Tx Power registers,
@@ -1287,10 +1280,9 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
&ofdmpowerLevel[0]);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Channel-%d, cckPowerLevel (A / B) = "
- "0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
- channel, cckpowerlevel[0], cckpowerlevel[1],
- ofdmpowerLevel[0], ofdmpowerLevel[1]));
+ "Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
+ channel, cckpowerlevel[0], cckpowerlevel[1],
+ ofdmpowerLevel[0], ofdmpowerLevel[1]);
_rtl92s_phy_ccxpower_indexcheck(hw, channel, &cckpowerlevel[0],
&ofdmpowerLevel[0]);
@@ -1316,7 +1308,7 @@ void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw)
} while (--pollingcnt);
if (pollingcnt == 0)
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Set FW Cmd fail!!\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Set FW Cmd fail!!\n");
}
@@ -1345,20 +1337,17 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
switch (rtlhal->current_fwcmd_io) {
case FW_CMD_RA_RESET:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_RA_RESET\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_RESET);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_ACTIVE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_RA_ACTIVE\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_ACTIVE\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_ACTIVE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_N:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_RA_REFRESH_N\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_REFRESH_N\n");
input = FW_RA_REFRESH;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
@@ -1367,7 +1356,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
break;
case FW_CMD_RA_REFRESH_BG:
RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_RA_REFRESH_BG\n"));
+ "FW_CMD_RA_REFRESH_BG\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_REFRESH);
rtl92s_phy_chk_fwcmd_iodone(hw);
rtl_write_dword(rtlpriv, WFM5, FW_RA_DISABLE_RSSI_MASK);
@@ -1375,21 +1364,20 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
break;
case FW_CMD_RA_REFRESH_N_COMB:
RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_RA_REFRESH_N_COMB\n"));
+ "FW_CMD_RA_REFRESH_N_COMB\n");
input = FW_RA_IOT_N_COMB;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_BG_COMB:
RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_RA_REFRESH_BG_COMB\n"));
+ "FW_CMD_RA_REFRESH_BG_COMB\n");
input = FW_RA_IOT_BG_COMB;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_IQK_ENABLE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_IQK_ENABLE\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_IQK_ENABLE\n");
rtl_write_dword(rtlpriv, WFM5, FW_IQK_ENABLE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
@@ -1424,8 +1412,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
break;
case FW_CMD_LPS_ENTER:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_LPS_ENTER\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_ENTER\n");
current_aid = rtlpriv->mac80211.assoc_id;
rtl_write_dword(rtlpriv, WFM5, (FW_LPS_ENTER |
((current_aid | 0xc000) << 8)));
@@ -1434,20 +1421,18 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
* turbo mode until driver leave LPS */
break;
case FW_CMD_LPS_LEAVE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_LPS_LEAVE\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_LEAVE\n");
rtl_write_dword(rtlpriv, WFM5, FW_LPS_LEAVE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_ADD_A2_ENTRY:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- ("FW_CMD_ADD_A2_ENTRY\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_ADD_A2_ENTRY\n");
rtl_write_dword(rtlpriv, WFM5, FW_ADD_A2_ENTRY);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_CTRL_DM_BY_DRIVER:
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("FW_CMD_CTRL_DM_BY_DRIVER\n"));
+ "FW_CMD_CTRL_DM_BY_DRIVER\n");
rtl_write_dword(rtlpriv, WFM5, FW_CTRL_DM_BY_DRIVER);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
@@ -1472,8 +1457,8 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
bool bPostProcessing = false;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
- fw_cmdio, rtlhal->set_fwcmd_inprogress));
+ "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
+ fw_cmdio, rtlhal->set_fwcmd_inprogress);
do {
/* We re-map to combined FW CMD ones if firmware version */
@@ -1501,7 +1486,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
* DM map table in the future. */
switch (fw_cmdio) {
case FW_CMD_RA_INIT:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("RA init!!\n"));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "RA init!!\n");
fw_cmdmap |= FW_RA_INIT_CTL;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
/* Clear control flag to sync with FW. */
@@ -1509,7 +1494,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
break;
case FW_CMD_DIG_DISABLE:
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Set DIG disable!!\n"));
+ "Set DIG disable!!\n");
fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
@@ -1517,14 +1502,14 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
case FW_CMD_DIG_RESUME:
if (!(rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE)) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Set DIG enable or resume!!\n"));
+ "Set DIG enable or resume!!\n");
fw_cmdmap |= (FW_DIG_ENABLE_CTL | FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
}
break;
case FW_CMD_DIG_HALT:
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Set DIG halt!!\n"));
+ "Set DIG halt!!\n");
fw_cmdmap &= ~(FW_DIG_ENABLE_CTL | FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
@@ -1540,9 +1525,8 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
(rtlefuse->thermalmeter[0] << 16));
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("Set TxPwr tracking!! "
- "FwCmdMap(%#x), FwParam(%#x)\n",
- fw_cmdmap, fw_param));
+ "Set TxPwr tracking!! FwCmdMap(%#x), FwParam(%#x)\n",
+ fw_cmdmap, fw_param);
FW_CMD_PARA_SET(rtlpriv, fw_param);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1563,9 +1547,8 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
fw_param &= FW_RA_PARAM_CLR;
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("[FW CMD] [New Version] "
- "Set RA/IOT Comb in n mode!! FwCmdMap(%#x), "
- "FwParam(%#x)\n", fw_cmdmap, fw_param));
+ "[FW CMD] [New Version] Set RA/IOT Comb in n mode!! FwCmdMap(%#x), FwParam(%#x)\n",
+ fw_cmdmap, fw_param);
FW_CMD_PARA_SET(rtlpriv, fw_param);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1635,7 +1618,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
break;
case FW_CMD_HIGH_PWR_ENABLE:
if (!(rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) &&
- (rtlpriv->dm.dynamic_txpower_enable != true)) {
+ !rtlpriv->dm.dynamic_txpower_enable) {
fw_cmdmap |= (FW_HIGH_PWR_ENABLE_CTL |
FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1652,7 +1635,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
break;
case FW_CMD_PAPE_CONTROL:
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- ("[FW CMD] Set PAPE Control\n"));
+ "[FW CMD] Set PAPE Control\n");
fw_cmdmap &= ~FW_PAPE_CTL_BY_SW_HW;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
index 37e504af6446..ac0387770630 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 11f125c030ce..84d1181795b8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 0ad50fe44aa2..08c2f5625129 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,8 +27,6 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "../wifi.h"
#include "reg.h"
#include "def.h"
@@ -123,13 +121,13 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
}
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("40MHz finalpwr_idx "
- "(A / B) = 0x%x / 0x%x\n", p_final_pwridx[0],
- p_final_pwridx[1]));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "40MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
+ p_final_pwridx[0], p_final_pwridx[1]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("20MHz finalpwr_idx "
- "(A / B) = 0x%x / 0x%x\n", p_final_pwridx[0],
- p_final_pwridx[1]));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "20MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
+ p_final_pwridx[0], p_final_pwridx[1]);
}
}
@@ -153,9 +151,8 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
ant_pwr_diff = -8;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Antenna Diff from RF-B "
- "to RF-A = %d (0x%x)\n", ant_pwr_diff,
- ant_pwr_diff & 0xf));
+ "Antenna Diff from RF-B to RF-A = %d (0x%x)\n",
+ ant_pwr_diff, ant_pwr_diff & 0xf);
ant_pwr_diff &= 0xf;
}
@@ -172,9 +169,8 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RFPGA0_TXGAINSTAGE, (BXBTXAGC | BXCTXAGC | BXDTXAGC),
u4reg_val);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Write BCD-Diff(0x%x) = 0x%x\n",
- RFPGA0_TXGAINSTAGE, u4reg_val));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Write BCD-Diff(0x%x) = 0x%x\n",
+ RFPGA0_TXGAINSTAGE, u4reg_val);
}
static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
@@ -201,8 +197,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
((index < 2) ? pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("RTK better performance, "
- "writeval = 0x%x\n", writeval));
+ "RTK better performance, writeval = 0x%x\n", writeval);
break;
case 1:
/* Realtek regulatory increase power diff defined
@@ -211,8 +206,8 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
writeval = ((index < 2) ? pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Realtek regulatory, "
- "40MHz, writeval = 0x%x\n", writeval));
+ "Realtek regulatory, 40MHz, writeval = 0x%x\n",
+ writeval);
} else {
if (rtlphy->pwrgroup_cnt == 1)
chnlgroup = 0;
@@ -234,16 +229,15 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Realtek regulatory, "
- "20MHz, writeval = 0x%x\n", writeval));
+ "Realtek regulatory, 20MHz, writeval = 0x%x\n",
+ writeval);
}
break;
case 2:
/* Better regulatory don't increase any power diff */
writeval = ((index < 2) ? pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Better regulatory, "
- "writeval = 0x%x\n", writeval));
+ "Better regulatory, writeval = 0x%x\n", writeval);
break;
case 3:
/* Customer defined power diff. increase power diff
@@ -252,14 +246,14 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("customer's limit, 40MHz = 0x%x\n",
- rtlefuse->pwrgroup_ht40
- [RF90_PATH_A][chnl - 1]));
+ "customer's limit, 40MHz = 0x%x\n",
+ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1]);
} else {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("customer's limit, 20MHz = 0x%x\n",
- rtlefuse->pwrgroup_ht20
- [RF90_PATH_A][chnl - 1]));
+ "customer's limit, 20MHz = 0x%x\n",
+ rtlefuse->pwrgroup_ht20
+ [RF90_PATH_A][chnl - 1]);
}
for (i = 0; i < 4; i++) {
@@ -293,22 +287,19 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
(pwrdiff_limit[1] << 8) |
(pwrdiff_limit[0]);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Customer's limit = 0x%x\n",
- customer_limit));
+ "Customer's limit = 0x%x\n", customer_limit);
writeval = customer_limit + ((index < 2) ?
pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("Customer, writeval = "
- "0x%x\n", writeval));
+ "Customer, writeval = 0x%x\n", writeval);
break;
default:
chnlgroup = 0;
writeval = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- ("RTK better performance, "
- "writeval = 0x%x\n", writeval));
+ "RTK better performance, writeval = 0x%x\n", writeval);
break;
}
@@ -508,7 +499,7 @@ bool rtl92s_phy_rf6052_config(struct ieee80211_hw *hw)
break;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Radio[%d] Fail!!\n", rfpath);
goto fail;
}
@@ -541,8 +532,7 @@ void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("unknown bandwidth: %#X\n",
- bandwidth));
+ "unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/rtlwifi/rtl8192se/rf.h
index 3843baa1a874..8a29eb94ab17 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 78723cf59491..f1b36005c6a2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,14 +27,11 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
+#include "../pci.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -45,6 +42,8 @@
#include "trx.h"
#include "led.h"
+#include <linux/module.h>
+
static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -89,12 +88,54 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = 2;
}
+static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct ieee80211_hw *hw = context;
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ struct rt_firmware *pfirmware = NULL;
+ int err;
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
+ complete(&rtlpriv->firmware_loading_complete);
+ if (!firmware) {
+ pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ rtlpriv->max_fw_size = 0;
+ return;
+ }
+ if (firmware->size > rtlpriv->max_fw_size) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Firmware is too big!\n");
+ rtlpriv->max_fw_size = 0;
+ release_firmware(firmware);
+ return;
+ }
+ pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
+ memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
+ pfirmware->sz_fw_tmpbufferlen = firmware->size;
+ release_firmware(firmware);
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Can't register mac80211 hw\n");
+ return;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+ rtlpci->irq_alloc = 1;
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+
+ /*init rfkill */
+ rtl_init_rfkill(hw);
+}
+
static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- const struct firmware *firmware;
- struct rt_firmware *pfirmware = NULL;
int err = 0;
u16 earlyrxthreshold = 7;
@@ -168,9 +209,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
if (!rtlpriv->psc.inactiveps)
- pr_info("rtl8192ce: Power Save off (module option)\n");
+ pr_info("Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
- pr_info("rtl8192ce: FW Power Save off (module option)\n");
+ pr_info("FW Power Save off (module option)\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
/* for ASPM, you can close aspm through
@@ -186,33 +227,22 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(sizeof(struct rt_firmware));
- if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't alloc buffer for fw.\n"));
+ if (!rtlpriv->rtlhal.pfirmware)
return 1;
- }
+
+ rtlpriv->max_fw_size = RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE;
pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
"Loading firmware %s\n", rtlpriv->cfg->fw_name);
/* request fw */
- err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
- rtlpriv->io.dev);
+ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl92se_fw_cb);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Failed to request firmware!\n"));
+ "Failed to request firmware!\n");
return 1;
}
- if (firmware->size > sizeof(struct rt_firmware)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Firmware is too big!\n"));
- release_firmware(firmware);
- return 1;
- }
-
- pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
- memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
- pfirmware->sz_fw_tmpbufferlen = firmware->size;
- release_firmware(firmware);
return err;
}
@@ -426,7 +456,7 @@ static int __init rtl92se_module_init(void)
ret = pci_register_driver(&rtl92se_driver);
if (ret)
- RT_ASSERT(false, (": No device found\n"));
+ RT_ASSERT(false, "No device found\n");
return ret;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/rtlwifi/rtl8192se/sw.h
index fc4eb285a0ac..2eb88862ebe4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/rtlwifi/rtl8192se/table.c
index 154185b3969d..f1a73f75127e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/table.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/rtlwifi/rtl8192se/table.h
index b4ed6d951ebb..2feb73b71a4f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/table.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/table.h
@@ -1,5 +1,5 @@
/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Realtek Corporation. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index fbebe3ea0a22..2fd3d13b7ced 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -756,7 +756,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
/* DOWRD 8 */
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
@@ -823,8 +823,8 @@ void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, ("ERR txdesc :%d not process\n",
- desc_name));
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -843,8 +843,8 @@ void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
SET_RX_STATUS_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, ("ERR rxdesc :%d not process\n",
- desc_name));
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
@@ -863,8 +863,8 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
break;
default:
- RT_ASSERT(false, ("ERR txdesc :%d not process\n",
- desc_name));
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -876,8 +876,8 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
break;
default:
- RT_ASSERT(false, ("ERR rxdesc :%d not process\n",
- desc_name));
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 05862c51b861..011e7b0695f2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e956fa71d040..2e1e352864bb 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -25,16 +25,13 @@
*
*****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/usb.h>
-#include <linux/export.h>
-#include "core.h"
#include "wifi.h"
+#include "core.h"
#include "usb.h"
#include "base.h"
#include "ps.h"
#include "rtl8192c/fw_common.h"
+#include <linux/export.h>
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
@@ -276,14 +273,14 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
? USB_HIGH_SPEED_BULK_SIZE
: USB_FULL_SPEED_BULK_SIZE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
- rtlusb->max_bulk_out_size));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size);
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
if (!ep_num) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Invalid endpoint map setting!\n"));
+ "Invalid endpoint map setting!\n");
return -EINVAL;
}
}
@@ -345,13 +342,18 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
rtlusb->out_ep_nums++;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
+ "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
- pep_desc->bInterval));
+ pep_desc->bInterval);
+ }
+ if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
+ pr_err("Too few input end points found\n");
+ return -EINVAL;
+ }
+ if (rtlusb->out_ep_nums == 0) {
+ pr_err("No output end points found\n");
+ return -EINVAL;
}
- if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num)
- return -EINVAL ;
-
/* usb endpoint mapping */
err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
@@ -360,7 +362,7 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
return err;
}
-static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
+static void rtl_usb_init_sw(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -395,7 +397,6 @@ static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
/* HIMR_EX - turn all on */
rtlusb->irq_mask[1] = 0xFFFFFFFF;
rtlusb->disableHWSM = true;
- return 0;
}
#define __RADIO_TAP_SIZE_RSV 32
@@ -414,7 +415,7 @@ static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
gfp_mask);
if (!skb) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Failed to __dev_alloc_skb!!\n"))
+ "Failed to __dev_alloc_skb!!\n");
return ERR_PTR(-ENOMEM);
}
@@ -520,12 +521,14 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
u8 *pdata;
uskb = dev_alloc_skb(skb->len + 128);
- memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
- sizeof(rx_status));
- pdata = (u8 *)skb_put(uskb, skb->len);
- memcpy(pdata, skb->data, skb->len);
+ if (uskb) { /* drop packet on allocation failure */
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+ sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+ ieee80211_rx_irqsafe(hw, uskb);
+ }
dev_kfree_skb_any(skb);
- ieee80211_rx_irqsafe(hw, uskb);
} else {
dev_kfree_skb_any(skb);
}
@@ -575,7 +578,7 @@ static void _rtl_rx_completed(struct urb *_urb)
if (IS_ERR(_skb)) {
err = PTR_ERR(_skb);
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Can't allocate skb for bulk IN!\n"));
+ "Can't allocate skb for bulk IN!\n");
return;
}
skb = _skb;
@@ -632,14 +635,14 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Failed to alloc URB!!\n"))
+ "Failed to alloc URB!!\n");
goto err_out;
}
skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
if (IS_ERR(skb)) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Failed to prep_rx_urb!!\n"))
+ "Failed to prep_rx_urb!!\n");
err = PTR_ERR(skb);
goto err_out;
}
@@ -665,15 +668,17 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
err = rtlpriv->cfg->ops->hw_init(hw);
- rtl_init_rx_config(hw);
+ if (!err) {
+ rtl_init_rx_config(hw);
- /* Enable software */
- SET_USB_START(rtlusb);
- /* should after adapter start and interrupt enable. */
- set_hal_start(rtlhal);
+ /* Enable software */
+ SET_USB_START(rtlusb);
+ /* should after adapter start and interrupt enable. */
+ set_hal_start(rtlhal);
- /* Start bulk IN */
- _rtl_usb_receive(hw);
+ /* Start bulk IN */
+ _rtl_usb_receive(hw);
+ }
return err;
}
@@ -745,7 +750,7 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
struct sk_buff *skb;
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Failed to submit urb.\n"));
+ "Failed to submit urb\n");
usb_unanchor_urb(_urb);
skb = (struct sk_buff *)_urb->context;
kfree_skb(skb);
@@ -768,7 +773,7 @@ static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
if (urb->status) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Urb has error status 0x%X\n", urb->status));
+ "Urb has error status 0x%X\n", urb->status);
goto out;
}
/* TODO: statistics */
@@ -805,7 +810,7 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!_urb) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("Can't allocate URB for bulk out!\n"));
+ "Can't allocate URB for bulk out!\n");
kfree_skb(skb);
return NULL;
}
@@ -830,7 +835,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
if (unlikely(IS_USB_STOP(rtlusb))) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- ("USB device is stopping...\n"));
+ "USB device is stopping...\n");
kfree_skb(skb);
return;
}
@@ -840,7 +845,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
_urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
if (unlikely(!_urb)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't allocate urb. Drop skb!\n"));
+ "Can't allocate urb. Drop skb!\n");
return;
}
urb_list = &rtlusb->tx_pending[ep_num];
@@ -865,7 +870,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
rtl_ips_nic_on(hw);
}
@@ -946,10 +951,11 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
sizeof(struct rtl_usb_priv), &rtl_ops);
if (!hw) {
- RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
+ RT_ASSERT(false, "ieee80211 alloc failed\n");
return -ENOMEM;
}
rtlpriv = hw->priv;
+ init_completion(&rtlpriv->firmware_loading_complete);
SET_IEEE80211_DEV(hw, &intf->dev);
udev = interface_to_usbdev(intf);
usb_get_dev(udev);
@@ -969,39 +975,28 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't init_sw_vars.\n"));
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
goto error_out;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = _rtl_usb_init(hw);
- err = _rtl_usb_init_sw(hw);
+ if (err)
+ goto error_out;
+ rtl_usb_init_sw(hw);
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- ("Can't allocate sw for mac80211.\n"));
+ "Can't allocate sw for mac80211\n");
goto error_out;
}
- /*init rfkill */
- /* rtl_init_rfkill(hw); */
-
- err = ieee80211_register_hw(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- ("Can't register mac80211 hw.\n"));
- goto error_out;
- } else {
- rtlpriv->mac80211.mac80211_registered = 1;
- }
- set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
error_out:
rtl_deinit_core(hw);
_rtl_usb_io_handler_release(hw);
- ieee80211_free_hw(hw);
usb_put_dev(udev);
+ complete(&rtlpriv->firmware_loading_complete);
return -ENODEV;
}
EXPORT_SYMBOL(rtl_usb_probe);
@@ -1015,6 +1010,9 @@ void rtl_usb_disconnect(struct usb_interface *intf)
if (unlikely(!rtlpriv))
return;
+
+ /* just in case driver is removed before firmware callback */
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
index d2a63fb3e1e6..43846b329153 100644
--- a/drivers/net/wireless/rtlwifi/usb.h
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,6 @@
#ifndef __RTL_USB_H__
#define __RTL_USB_H__
-#include <linux/usb.h>
#include <linux/skbuff.h>
#define RTL_RX_DESC_SIZE 24
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cdaf1429fa0b..b591614c3b9b 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009-2010 Realtek Corporation.
+ * Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -30,12 +30,15 @@
#ifndef __RTL_WIFI_H__
#define __RTL_WIFI_H__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
#include <net/mac80211.h>
+#include <linux/completion.h>
#include "debug.h"
#define RF_CHANGE_BY_INIT 0
@@ -1045,7 +1048,6 @@ struct rtl_hal {
u16 fw_subversion;
bool h2c_setinprogress;
u8 last_hmeboxnum;
- bool fw_ready;
/*Reserve page start offset except beacon in TxQ. */
u8 fw_rsvdpage_startoffset;
u8 h2c_txcmd_seq;
@@ -1591,6 +1593,7 @@ struct rtl_debug {
};
struct rtl_priv {
+ struct completion firmware_loading_complete;
struct rtl_locks locks;
struct rtl_works works;
struct rtl_mac mac80211;
@@ -1612,6 +1615,7 @@ struct rtl_priv {
struct rtl_rate_priv *rate_priv;
struct rtl_debug dbg;
+ int max_fw_size;
/*
*hal_cfg : for diff cards
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/wl1251/Makefile
index 58b4f935a3f6..a5c6328b5f72 100644
--- a/drivers/net/wireless/wl1251/Makefile
+++ b/drivers/net/wireless/wl1251/Makefile
@@ -6,3 +6,5 @@ wl1251_sdio-objs += sdio.o
obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/wl1251/boot.c
index d729daf8e841..a2e5241382da 100644
--- a/drivers/net/wireless/wl1251/boot.c
+++ b/drivers/net/wireless/wl1251/boot.c
@@ -464,8 +464,6 @@ static int wl1251_boot_upload_nvs(struct wl1251 *wl)
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
- val = cpu_to_le32(val);
-
wl1251_debug(DEBUG_BOOT,
"nvs write table 0x%x: 0x%x",
nvs_start, val);
diff --git a/drivers/net/wireless/wl1251/io.h b/drivers/net/wireless/wl1251/io.h
index c545e9d5f512..d382877c34cc 100644
--- a/drivers/net/wireless/wl1251/io.h
+++ b/drivers/net/wireless/wl1251/io.h
@@ -36,16 +36,15 @@
static inline u32 wl1251_read32(struct wl1251 *wl, int addr)
{
- u32 response;
-
- wl->if_ops->read(wl, addr, &response, sizeof(u32));
+ wl->if_ops->read(wl, addr, &wl->buffer_32, sizeof(wl->buffer_32));
- return response;
+ return le32_to_cpu(wl->buffer_32);
}
static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val)
{
- wl->if_ops->write(wl, addr, &val, sizeof(u32));
+ wl->buffer_32 = cpu_to_le32(val);
+ wl->if_ops->write(wl, addr, &wl->buffer_32, sizeof(wl->buffer_32));
}
static inline u32 wl1251_read_elp(struct wl1251 *wl, int addr)
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index ba3268ea81fe..41302c7b1ad0 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -514,6 +514,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
struct wl1251 *wl = hw->priv;
int ret = 0;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
vif->type, vif->addr);
@@ -1338,9 +1341,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_UAPSD |
- IEEE80211_HW_SUPPORTS_CQM_RSSI;
+ IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index a77f1bbbed0a..9d8f5816c6f9 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -380,7 +380,7 @@ struct wl1251 {
struct wl1251_stats stats;
struct wl1251_debugfs debugfs;
- u32 buffer_32;
+ __le32 buffer_32;
u32 buffer_cmd;
u8 buffer_busyword[WL1251_BUSY_WORD_LEN];
struct wl1251_rx_descriptor *rx_descriptor;
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index fe67262ba19f..98f289c907a9 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 7537c401a448..bc96db0683a5 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -34,12 +34,14 @@
#include "reg.h"
#include "ps.h"
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 wake_up_event, u8 listen_interval)
{
struct acx_wake_up_condition *wake_up;
int ret;
- wl1271_debug(DEBUG_ACX, "acx wake up conditions");
+ wl1271_debug(DEBUG_ACX, "acx wake up conditions (wake_up_event %d listen_interval %d)",
+ wake_up_event, listen_interval);
wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL);
if (!wake_up) {
@@ -48,8 +50,8 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
wake_up->role_id = wlvif->role_id;
- wake_up->wake_up_event = wl->conf.conn.wake_up_event;
- wake_up->listen_interval = wl->conf.conn.listen_interval;
+ wake_up->wake_up_event = wake_up_event;
+ wake_up->listen_interval = listen_interval;
ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
wake_up, sizeof(*wake_up));
@@ -1459,9 +1461,10 @@ out:
return ret;
}
-int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
+int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u64 *mactime)
{
- struct wl1271_acx_fw_tsf_information *tsf_info;
+ struct wl12xx_acx_fw_tsf_information *tsf_info;
int ret;
tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
@@ -1470,6 +1473,8 @@ int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
goto out;
}
+ tsf_info->role_id = wlvif->role_id;
+
ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO,
tsf_info, sizeof(*tsf_info));
if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 69892b40c2df..a28fc044034c 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -995,15 +995,17 @@ struct wl1271_acx_ba_receiver_setup {
u8 padding[2];
} __packed;
-struct wl1271_acx_fw_tsf_information {
+struct wl12xx_acx_fw_tsf_information {
struct acx_header header;
+ u8 role_id;
+ u8 padding1[3];
__le32 current_tsf_high;
__le32 current_tsf_low;
__le32 last_bttt_high;
__le32 last_tbtt_low;
u8 last_dtim_count;
- u8 padding[3];
+ u8 padding2[3];
} __packed;
struct wl1271_acx_ps_rx_streaming {
@@ -1151,79 +1153,81 @@ struct wl12xx_acx_config_hangover {
} __packed;
enum {
- ACX_WAKE_UP_CONDITIONS = 0x0002,
- ACX_MEM_CFG = 0x0003,
- ACX_SLOT = 0x0004,
- ACX_AC_CFG = 0x0007,
- ACX_MEM_MAP = 0x0008,
- ACX_AID = 0x000A,
- ACX_MEDIUM_USAGE = 0x000F,
- ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */
- ACX_STATISTICS = 0x0013, /* Debug API */
- ACX_PWR_CONSUMPTION_STATISTICS = 0x0014,
- ACX_FEATURE_CFG = 0x0015,
- ACX_TID_CFG = 0x001A,
- ACX_PS_RX_STREAMING = 0x001B,
- ACX_BEACON_FILTER_OPT = 0x001F,
- ACX_NOISE_HIST = 0x0021,
- ACX_HDK_VERSION = 0x0022, /* ??? */
- ACX_PD_THRESHOLD = 0x0023,
- ACX_TX_CONFIG_OPT = 0x0024,
- ACX_CCA_THRESHOLD = 0x0025,
- ACX_EVENT_MBOX_MASK = 0x0026,
- ACX_CONN_MONIT_PARAMS = 0x002D,
- ACX_BCN_DTIM_OPTIONS = 0x0031,
- ACX_SG_ENABLE = 0x0032,
- ACX_SG_CFG = 0x0033,
- ACX_FM_COEX_CFG = 0x0034,
- ACX_BEACON_FILTER_TABLE = 0x0038,
- ACX_ARP_IP_FILTER = 0x0039,
- ACX_ROAMING_STATISTICS_TBL = 0x003B,
- ACX_RATE_POLICY = 0x003D,
- ACX_CTS_PROTECTION = 0x003E,
- ACX_SLEEP_AUTH = 0x003F,
- ACX_PREAMBLE_TYPE = 0x0040,
- ACX_ERROR_CNT = 0x0041,
- ACX_IBSS_FILTER = 0x0044,
- ACX_SERVICE_PERIOD_TIMEOUT = 0x0045,
- ACX_TSF_INFO = 0x0046,
- ACX_CONFIG_PS_WMM = 0x0049,
- ACX_ENABLE_RX_DATA_FILTER = 0x004A,
- ACX_SET_RX_DATA_FILTER = 0x004B,
- ACX_GET_DATA_FILTER_STATISTICS = 0x004C,
- ACX_RX_CONFIG_OPT = 0x004E,
- ACX_FRAG_CFG = 0x004F,
- ACX_BET_ENABLE = 0x0050,
- ACX_RSSI_SNR_TRIGGER = 0x0051,
- ACX_RSSI_SNR_WEIGHTS = 0x0052,
- ACX_KEEP_ALIVE_MODE = 0x0053,
- ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
- ACX_BA_SESSION_INIT_POLICY = 0x0055,
- ACX_BA_SESSION_RX_SETUP = 0x0056,
- ACX_PEER_HT_CAP = 0x0057,
- ACX_HT_BSS_OPERATION = 0x0058,
- ACX_COEX_ACTIVITY = 0x0059,
- ACX_BURST_MODE = 0x005C,
- ACX_SET_RATE_MGMT_PARAMS = 0x005D,
- ACX_SET_RATE_ADAPT_PARAMS = 0x0060,
- ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
- ACX_GEN_FW_CMD = 0x0070,
- ACX_HOST_IF_CFG_BITMAP = 0x0071,
- ACX_MAX_TX_FAILURE = 0x0072,
- ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
- DOT11_RX_MSDU_LIFE_TIME = 0x1004,
- DOT11_CUR_TX_PWR = 0x100D,
- DOT11_RX_DOT11_MODE = 0x1012,
- DOT11_RTS_THRESHOLD = 0x1013,
- DOT11_GROUP_ADDRESS_TBL = 0x1014,
- ACX_PM_CONFIG = 0x1016,
- ACX_CONFIG_PS = 0x1017,
- ACX_CONFIG_HANGOVER = 0x1018,
+ ACX_WAKE_UP_CONDITIONS = 0x0000,
+ ACX_MEM_CFG = 0x0001,
+ ACX_SLOT = 0x0002,
+ ACX_AC_CFG = 0x0003,
+ ACX_MEM_MAP = 0x0004,
+ ACX_AID = 0x0005,
+ ACX_MEDIUM_USAGE = 0x0006,
+ ACX_STATISTICS = 0x0007,
+ ACX_PWR_CONSUMPTION_STATISTICS = 0x0008,
+ ACX_TID_CFG = 0x0009,
+ ACX_PS_RX_STREAMING = 0x000A,
+ ACX_BEACON_FILTER_OPT = 0x000B,
+ ACX_NOISE_HIST = 0x000C,
+ ACX_HDK_VERSION = 0x000D,
+ ACX_PD_THRESHOLD = 0x000E,
+ ACX_TX_CONFIG_OPT = 0x000F,
+ ACX_CCA_THRESHOLD = 0x0010,
+ ACX_EVENT_MBOX_MASK = 0x0011,
+ ACX_CONN_MONIT_PARAMS = 0x0012,
+ ACX_DISABLE_BROADCASTS = 0x0013,
+ ACX_BCN_DTIM_OPTIONS = 0x0014,
+ ACX_SG_ENABLE = 0x0015,
+ ACX_SG_CFG = 0x0016,
+ ACX_FM_COEX_CFG = 0x0017,
+ ACX_BEACON_FILTER_TABLE = 0x0018,
+ ACX_ARP_IP_FILTER = 0x0019,
+ ACX_ROAMING_STATISTICS_TBL = 0x001A,
+ ACX_RATE_POLICY = 0x001B,
+ ACX_CTS_PROTECTION = 0x001C,
+ ACX_SLEEP_AUTH = 0x001D,
+ ACX_PREAMBLE_TYPE = 0x001E,
+ ACX_ERROR_CNT = 0x001F,
+ ACX_IBSS_FILTER = 0x0020,
+ ACX_SERVICE_PERIOD_TIMEOUT = 0x0021,
+ ACX_TSF_INFO = 0x0022,
+ ACX_CONFIG_PS_WMM = 0x0023,
+ ACX_ENABLE_RX_DATA_FILTER = 0x0024,
+ ACX_SET_RX_DATA_FILTER = 0x0025,
+ ACX_GET_DATA_FILTER_STATISTICS = 0x0026,
+ ACX_RX_CONFIG_OPT = 0x0027,
+ ACX_FRAG_CFG = 0x0028,
+ ACX_BET_ENABLE = 0x0029,
+ ACX_RSSI_SNR_TRIGGER = 0x002A,
+ ACX_RSSI_SNR_WEIGHTS = 0x002B,
+ ACX_KEEP_ALIVE_MODE = 0x002C,
+ ACX_SET_KEEP_ALIVE_CONFIG = 0x002D,
+ ACX_BA_SESSION_INIT_POLICY = 0x002E,
+ ACX_BA_SESSION_RX_SETUP = 0x002F,
+ ACX_PEER_HT_CAP = 0x0030,
+ ACX_HT_BSS_OPERATION = 0x0031,
+ ACX_COEX_ACTIVITY = 0x0032,
+ ACX_BURST_MODE = 0x0033,
+ ACX_SET_RATE_MGMT_PARAMS = 0x0034,
+ ACX_GET_RATE_MGMT_PARAMS = 0x0035,
+ ACX_SET_RATE_ADAPT_PARAMS = 0x0036,
+ ACX_SET_DCO_ITRIM_PARAMS = 0x0037,
+ ACX_GEN_FW_CMD = 0x0038,
+ ACX_HOST_IF_CFG_BITMAP = 0x0039,
+ ACX_MAX_TX_FAILURE = 0x003A,
+ ACX_UPDATE_INCONNECTION_STA_LIST = 0x003B,
+ DOT11_RX_MSDU_LIFE_TIME = 0x003C,
+ DOT11_CUR_TX_PWR = 0x003D,
+ DOT11_RTS_THRESHOLD = 0x003E,
+ DOT11_GROUP_ADDRESS_TBL = 0x003F,
+ ACX_PM_CONFIG = 0x0040,
+ ACX_CONFIG_PS = 0x0041,
+ ACX_CONFIG_HANGOVER = 0x0042,
+ ACX_FEATURE_CFG = 0x0043,
+ ACX_PROTECTION_CFG = 0x0044,
};
int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
- struct wl12xx_vif *wlvif);
+ struct wl12xx_vif *wlvif,
+ u8 wake_up_event, u8 listen_interval);
int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int power);
@@ -1296,7 +1300,8 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid);
-int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u64 *mactime);
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 8f9cf5a816ea..954101d03f06 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -33,65 +33,6 @@
#include "event.h"
#include "rx.h"
-static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
- [PART_DOWN] = {
- .mem = {
- .start = 0x00000000,
- .size = 0x000177c0
- },
- .reg = {
- .start = REGISTERS_BASE,
- .size = 0x00008800
- },
- .mem2 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- .mem3 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- },
-
- [PART_WORK] = {
- .mem = {
- .start = 0x00040000,
- .size = 0x00014fc0
- },
- .reg = {
- .start = REGISTERS_BASE,
- .size = 0x0000a000
- },
- .mem2 = {
- .start = 0x003004f8,
- .size = 0x00000004
- },
- .mem3 = {
- .start = 0x00040404,
- .size = 0x00000000
- },
- },
-
- [PART_DRPW] = {
- .mem = {
- .start = 0x00040000,
- .size = 0x00014fc0
- },
- .reg = {
- .start = DRPW_BASE,
- .size = 0x00006000
- },
- .mem2 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- .mem3 = {
- .start = 0x00000000,
- .size = 0x00000000
- }
- }
-};
-
static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
{
u32 cpu_ctrl;
@@ -181,13 +122,13 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
return -ENOMEM;
}
- memcpy(&partition, &part_table[PART_DOWN], sizeof(partition));
+ memcpy(&partition, &wl12xx_part_table[PART_DOWN], sizeof(partition));
partition.mem.start = dest;
wl1271_set_partition(wl, &partition);
/* 10.1 set partition limit and chunk num */
chunk_num = 0;
- partition_limit = part_table[PART_DOWN].mem.size;
+ partition_limit = wl12xx_part_table[PART_DOWN].mem.size;
while (chunk_num < fw_data_len / CHUNK_SIZE) {
/* 10.2 update partition, if needed */
@@ -195,7 +136,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
if (addr > partition_limit) {
addr = dest + chunk_num * CHUNK_SIZE;
partition_limit = chunk_num * CHUNK_SIZE +
- part_table[PART_DOWN].mem.size;
+ wl12xx_part_table[PART_DOWN].mem.size;
partition.mem.start = addr;
wl1271_set_partition(wl, &partition);
}
@@ -317,12 +258,12 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
}
/* update current MAC address to NVS */
- nvs_ptr[11] = wl->mac_addr[0];
- nvs_ptr[10] = wl->mac_addr[1];
- nvs_ptr[6] = wl->mac_addr[2];
- nvs_ptr[5] = wl->mac_addr[3];
- nvs_ptr[4] = wl->mac_addr[4];
- nvs_ptr[3] = wl->mac_addr[5];
+ nvs_ptr[11] = wl->addresses[0].addr[0];
+ nvs_ptr[10] = wl->addresses[0].addr[1];
+ nvs_ptr[6] = wl->addresses[0].addr[2];
+ nvs_ptr[5] = wl->addresses[0].addr[3];
+ nvs_ptr[4] = wl->addresses[0].addr[4];
+ nvs_ptr[3] = wl->addresses[0].addr[5];
/*
* Layout before the actual NVS tables:
@@ -383,7 +324,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
- wl1271_set_partition(wl, &part_table[PART_WORK]);
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
/* Copy the NVS tables to a new block to ensure alignment */
nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
@@ -492,7 +433,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
/* set the working partition to its "running" mode offset */
- wl1271_set_partition(wl, &part_table[PART_WORK]);
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
wl->cmd_box_addr, wl->event_box_addr);
@@ -507,8 +448,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
/* unmask required mbox events */
wl->event_mask = BSS_LOSE_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
- PS_REPORT_EVENT_ID |
- DISCONNECT_EVENT_COMPLETE_ID |
+ ROLE_STOP_COMPLETE_EVENT_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
SOFT_GEMINI_SENSE_EVENT_ID |
@@ -547,19 +487,6 @@ static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
return 0;
}
-static void wl1271_boot_hw_version(struct wl1271 *wl)
-{
- u32 fuse;
-
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fuse = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
- else
- fuse = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
- fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
-
- wl->hw_pg_ver = (s8)fuse;
-}
-
static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
{
u16 spare_reg;
@@ -698,7 +625,7 @@ static int wl127x_boot_clk(struct wl1271 *wl)
u32 pause;
u32 clk;
- if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
+ if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
@@ -753,8 +680,6 @@ int wl1271_load_firmware(struct wl1271 *wl)
u32 tmp, clk;
int selected_clock = -1;
- wl1271_boot_hw_version(wl);
-
if (wl->chip.id == CHIP_ID_1283_PG20) {
ret = wl128x_boot_clk(wl, &selected_clock);
if (ret < 0)
@@ -769,7 +694,7 @@ int wl1271_load_firmware(struct wl1271 *wl)
wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
udelay(500);
- wl1271_set_partition(wl, &part_table[PART_DRPW]);
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
/* Read-modify-write DRPW_SCRATCH_START register (see next state)
to be used by DRPw FW. The RTRIM value will be added by the FW
@@ -788,7 +713,7 @@ int wl1271_load_firmware(struct wl1271 *wl)
wl1271_write32(wl, DRPW_SCRATCH_START, clk);
- wl1271_set_partition(wl, &part_table[PART_WORK]);
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
/* Disable interrupts */
wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
index 06dad9380fa7..c3adc09f403d 100644
--- a/drivers/net/wireless/wl12xx/boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -55,16 +55,6 @@ struct wl1271_static_data {
#define OCP_REG_CLK_POLARITY 0x0cb2
#define OCP_REG_CLK_PULL 0x0cb4
-#define WL127X_REG_FUSE_DATA_2_1 0x050a
-#define WL128X_REG_FUSE_DATA_2_1 0x2152
-#define PG_VER_MASK 0x3c
-#define PG_VER_OFFSET 2
-
-#define PG_MAJOR_VER_MASK 0x3
-#define PG_MAJOR_VER_OFFSET 0x0
-#define PG_MINOR_VER_MASK 0xc
-#define PG_MINOR_VER_OFFSET 0x2
-
#define CMD_MBOX_ADDRESS 0x407B4
#define POLARITY_LOW BIT(1)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 25990bd38be6..3414fc11e9ba 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -459,23 +459,39 @@ out:
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
+ unsigned long flags;
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
+ /* these bits are used by op_tx */
+ spin_lock_irqsave(&wl->wl_lock, flags);
__set_bit(link, wl->links_map);
__set_bit(link, wlvif->links_map);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
*hlid = link;
return 0;
}
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
+ unsigned long flags;
+
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
+ /* these bits are used by op_tx */
+ spin_lock_irqsave(&wl->wl_lock, flags);
__clear_bit(*hlid, wl->links_map);
__clear_bit(*hlid, wlvif->links_map);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /*
+ * At this point op_tx() will not add more packets to the queues. We
+ * can purge them.
+ */
+ wl1271_tx_reset_link_queues(wl, *hlid);
+
*hlid = WL12XX_INVALID_LINK_ID;
}
@@ -515,7 +531,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
goto out_free;
}
cmd->device.hlid = wlvif->dev_hlid;
- cmd->device.session = wlvif->session_counter;
+ cmd->device.session = wl12xx_get_new_session_id(wl, wlvif);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -566,7 +582,7 @@ static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
goto out_free;
}
- ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
+ ret = wl1271_cmd_wait_for_event(wl, ROLE_STOP_COMPLETE_EVENT_ID);
if (ret < 0) {
wl1271_error("cmd role stop dev event completion error");
goto out_free;
@@ -715,6 +731,8 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+ /* FIXME: Change when adding DFS */
+ cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
cmd->channel = wlvif->channel;
if (!bss_conf->hidden_ssid) {
@@ -994,7 +1012,7 @@ out:
}
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- u8 ps_mode)
+ u8 ps_mode, u16 auto_ps_timeout)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -1009,6 +1027,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ps_params->role_id = wlvif->role_id;
ps_params->ps_mode = ps_mode;
+ ps_params->auto_ps_timeout = auto_ps_timeout;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params), 0);
@@ -1022,13 +1041,15 @@ out:
return ret;
}
-int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len, int index, u32 rates)
+int wl1271_cmd_template_set(struct wl1271 *wl, u8 role_id,
+ u16 template_id, void *buf, size_t buf_len,
+ int index, u32 rates)
{
struct wl1271_cmd_template_set *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd template_set %d", template_id);
+ wl1271_debug(DEBUG_CMD, "cmd template_set %d (role %d)",
+ template_id, role_id);
WARN_ON(buf_len > WL1271_CMD_TEMPL_MAX_SIZE);
buf_len = min_t(size_t, buf_len, WL1271_CMD_TEMPL_MAX_SIZE);
@@ -1039,6 +1060,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
goto out;
}
+ /* during initialization wlvif is NULL */
+ cmd->role_id = role_id;
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
cmd->enabled_rates = cpu_to_le32(rates);
@@ -1082,7 +1105,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ptr = skb->data;
}
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_NULL_DATA, ptr, size, 0,
wlvif->basic_rate);
out:
@@ -1105,7 +1129,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
if (!skb)
goto out;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
skb->data, skb->len,
CMD_TEMPL_KLV_IDX_NULL_DATA,
wlvif->basic_rate);
@@ -1130,7 +1154,8 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (!skb)
goto out;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_PS_POLL, skb->data,
skb->len, 0, wlvif->basic_rate_set);
out:
@@ -1138,9 +1163,10 @@ out:
return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len, u8 band)
+ const u8 *ie, size_t ie_len)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
@@ -1158,10 +1184,12 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+ ret = wl1271_cmd_template_set(wl, role_id,
+ CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+ ret = wl1271_cmd_template_set(wl, role_id,
+ CMD_TEMPL_CFG_PROBE_REQ_5,
skb->data, skb->len, 0, rate);
out:
@@ -1186,10 +1214,12 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
if (wlvif->band == IEEE80211_BAND_2GHZ)
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_CFG_PROBE_REQ_5,
skb->data, skb->len, 0, rate);
if (ret < 0)
@@ -1199,32 +1229,34 @@ out:
return skb;
}
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- int ret;
+ int ret, extra;
+ u16 fc;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- struct wl12xx_arp_rsp_template tmpl;
+ struct sk_buff *skb;
+ struct wl12xx_arp_rsp_template *tmpl;
struct ieee80211_hdr_3addr *hdr;
struct arphdr *arp_hdr;
- memset(&tmpl, 0, sizeof(tmpl));
+ skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
+ WL1271_EXTRA_SPACE_MAX);
+ if (!skb) {
+ wl1271_error("failed to allocate buffer for arp rsp template");
+ return -ENOMEM;
+ }
- /* mac80211 header */
- hdr = &tmpl.hdr;
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_DATA |
- IEEE80211_FCTL_TODS);
- memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
- memcpy(hdr->addr2, vif->addr, ETH_ALEN);
- memset(hdr->addr3, 0xff, ETH_ALEN);
+ skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);
+
+ tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
+ memset(tmpl, 0, sizeof(tmpl));
/* llc layer */
- memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
- tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
+ memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
+ tmpl->llc_type = cpu_to_be16(ETH_P_ARP);
/* arp header */
- arp_hdr = &tmpl.arp_hdr;
+ arp_hdr = &tmpl->arp_hdr;
arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
arp_hdr->ar_hln = ETH_ALEN;
@@ -1232,13 +1264,59 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
- memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
- tmpl.sender_ip = ip_addr;
+ memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN);
+ tmpl->sender_ip = wlvif->ip_addr;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
- &tmpl, sizeof(tmpl), 0,
- wlvif->basic_rate);
+ /* encryption space */
+ switch (wlvif->encryption_type) {
+ case KEY_TKIP:
+ extra = WL1271_EXTRA_SPACE_TKIP;
+ break;
+ case KEY_AES:
+ extra = WL1271_EXTRA_SPACE_AES;
+ break;
+ case KEY_NONE:
+ case KEY_WEP:
+ case KEY_GEM:
+ extra = 0;
+ break;
+ default:
+ wl1271_warning("Unknown encryption type: %d",
+ wlvif->encryption_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (extra) {
+ u8 *space = skb_push(skb, extra);
+ memset(space, 0, extra);
+ }
+
+ /* QoS header - BE */
+ if (wlvif->sta.qos)
+ memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));
+ /* mac80211 header */
+ hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
+ memset(hdr, 0, sizeof(hdr));
+ fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
+ if (wlvif->sta.qos)
+ fc |= IEEE80211_STYPE_QOS_DATA;
+ else
+ fc |= IEEE80211_STYPE_DATA;
+ if (wlvif->encryption_type != KEY_NONE)
+ fc |= IEEE80211_FCTL_PROTECTED;
+
+ hdr->frame_control = cpu_to_le16(fc);
+ memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
+ memset(hdr->addr3, 0xff, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
+ skb->data, skb->len, 0,
+ wlvif->basic_rate);
+out:
+ dev_kfree_skb(skb);
return ret;
}
@@ -1260,7 +1338,8 @@ int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
/* FIXME: not sure what priority to use here */
template.qos_ctrl = cpu_to_le16(0);
- return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
+ return wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_QOS_NULL_DATA, &template,
sizeof(template), 0,
wlvif->basic_rate);
}
@@ -1739,11 +1818,20 @@ int wl12xx_croc(struct wl1271 *wl, u8 role_id)
goto out;
__clear_bit(role_id, wl->roc_map);
+
+ /*
+ * Rearm the tx watchdog when removing the last ROC. This prevents
+ * recoveries due to just finished ROCs - when Tx hasn't yet had
+ * a chance to get out.
+ */
+ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES)
+ wl12xx_rearm_tx_watchdog_locked(wl);
out:
return ret;
}
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch)
{
struct wl12xx_cmd_channel_switch *cmd;
@@ -1757,10 +1845,13 @@ int wl12xx_cmd_channel_switch(struct wl1271 *wl,
goto out;
}
+ cmd->role_id = wlvif->role_id;
cmd->channel = ch_switch->channel->hw_value;
cmd->switch_time = ch_switch->count;
- cmd->tx_suspend = ch_switch->block_tx;
- cmd->flush = 0; /* this value is ignored by the FW */
+ cmd->stop_tx = ch_switch->block_tx;
+
+ /* FIXME: control from mac80211 in the future */
+ cmd->post_switch_tx_disable = 0; /* Enable TX on the target channel */
ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 3f7d0b93c24d..de217d92516b 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -51,22 +51,23 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- u8 ps_mode);
+ u8 ps_mode, u16 auto_ps_timeout);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
-int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len, int index, u32 rates);
+int wl1271_cmd_template_set(struct wl1271 *wl, u8 role_id,
+ u16 template_id, void *buf, size_t buf_len,
+ int index, u32 rates);
int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len, u8 band);
+ const u8 *ie, size_t ie_len);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- __be32 ip_addr);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
@@ -89,6 +90,7 @@ int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch);
int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
@@ -96,62 +98,65 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
enum wl1271_commands {
- CMD_INTERROGATE = 1, /*use this to read information elements*/
- CMD_CONFIGURE = 2, /*use this to write information elements*/
- CMD_ENABLE_RX = 3,
- CMD_ENABLE_TX = 4,
- CMD_DISABLE_RX = 5,
- CMD_DISABLE_TX = 6,
- CMD_SCAN = 8,
- CMD_STOP_SCAN = 9,
- CMD_SET_KEYS = 12,
- CMD_READ_MEMORY = 13,
- CMD_WRITE_MEMORY = 14,
- CMD_SET_TEMPLATE = 19,
- CMD_TEST = 23,
- CMD_NOISE_HIST = 28,
- CMD_QUIET_ELEMENT_SET_STATE = 29,
- CMD_SET_BCN_MODE = 33,
- CMD_MEASUREMENT = 34,
- CMD_STOP_MEASUREMENT = 35,
- CMD_SET_PS_MODE = 37,
- CMD_CHANNEL_SWITCH = 38,
- CMD_STOP_CHANNEL_SWICTH = 39,
- CMD_AP_DISCOVERY = 40,
- CMD_STOP_AP_DISCOVERY = 41,
- CMD_HEALTH_CHECK = 45,
- CMD_DEBUG = 46,
- CMD_TRIGGER_SCAN_TO = 47,
- CMD_CONNECTION_SCAN_CFG = 48,
- CMD_CONNECTION_SCAN_SSID_CFG = 49,
- CMD_START_PERIODIC_SCAN = 50,
- CMD_STOP_PERIODIC_SCAN = 51,
- CMD_SET_PEER_STATE = 52,
- CMD_REMAIN_ON_CHANNEL = 53,
- CMD_CANCEL_REMAIN_ON_CHANNEL = 54,
-
- CMD_CONFIG_FWLOGGER = 55,
- CMD_START_FWLOGGER = 56,
- CMD_STOP_FWLOGGER = 57,
-
- /* AP commands */
- CMD_ADD_PEER = 62,
- CMD_REMOVE_PEER = 63,
+ CMD_INTERROGATE = 1, /* use this to read information elements */
+ CMD_CONFIGURE = 2, /* use this to write information elements */
+ CMD_ENABLE_RX = 3,
+ CMD_ENABLE_TX = 4,
+ CMD_DISABLE_RX = 5,
+ CMD_DISABLE_TX = 6,
+ CMD_SCAN = 7,
+ CMD_STOP_SCAN = 8,
+ CMD_SET_KEYS = 9,
+ CMD_READ_MEMORY = 10,
+ CMD_WRITE_MEMORY = 11,
+ CMD_SET_TEMPLATE = 12,
+ CMD_TEST = 13,
+ CMD_NOISE_HIST = 14,
+ CMD_QUIET_ELEMENT_SET_STATE = 15,
+ CMD_SET_BCN_MODE = 16,
+
+ CMD_MEASUREMENT = 17,
+ CMD_STOP_MEASUREMENT = 18,
+ CMD_SET_PS_MODE = 19,
+ CMD_CHANNEL_SWITCH = 20,
+ CMD_STOP_CHANNEL_SWICTH = 21,
+ CMD_AP_DISCOVERY = 22,
+ CMD_STOP_AP_DISCOVERY = 23,
+ CMD_HEALTH_CHECK = 24,
+ CMD_DEBUG = 25,
+ CMD_TRIGGER_SCAN_TO = 26,
+ CMD_CONNECTION_SCAN_CFG = 27,
+ CMD_CONNECTION_SCAN_SSID_CFG = 28,
+ CMD_START_PERIODIC_SCAN = 29,
+ CMD_STOP_PERIODIC_SCAN = 30,
+ CMD_SET_PEER_STATE = 31,
+ CMD_REMAIN_ON_CHANNEL = 32,
+ CMD_CANCEL_REMAIN_ON_CHANNEL = 33,
+ CMD_CONFIG_FWLOGGER = 34,
+ CMD_START_FWLOGGER = 35,
+ CMD_STOP_FWLOGGER = 36,
+
+ /* Access point commands */
+ CMD_ADD_PEER = 37,
+ CMD_REMOVE_PEER = 38,
/* Role API */
- CMD_ROLE_ENABLE = 70,
- CMD_ROLE_DISABLE = 71,
- CMD_ROLE_START = 72,
- CMD_ROLE_STOP = 73,
+ CMD_ROLE_ENABLE = 39,
+ CMD_ROLE_DISABLE = 40,
+ CMD_ROLE_START = 41,
+ CMD_ROLE_STOP = 42,
- /* WIFI Direct */
- CMD_WFD_START_DISCOVERY = 80,
- CMD_WFD_STOP_DISCOVERY = 81,
- CMD_WFD_ATTRIBUTE_CONFIG = 82,
+ /* DFS */
+ CMD_START_RADAR_DETECTION = 43,
+ CMD_STOP_RADAR_DETECTION = 44,
- CMD_NOP = 100,
+ /* WIFI Direct */
+ CMD_WFD_START_DISCOVERY = 45,
+ CMD_WFD_STOP_DISCOVERY = 46,
+ CMD_WFD_ATTRIBUTE_CONFIG = 47,
+ CMD_NOP = 48,
+ CMD_LAST_COMMAND,
- NUM_COMMANDS,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -191,7 +196,7 @@ enum cmd_templ {
/* unit ms */
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
-#define WL1271_CMD_TEMPL_MAX_SIZE 548
+#define WL1271_CMD_TEMPL_MAX_SIZE 512
#define WL1271_EVENT_TIMEOUT 750
struct wl1271_cmd_header {
@@ -339,7 +344,9 @@ struct wl12xx_cmd_role_start {
u8 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 padding_1[5];
+ u8 reset_tsf;
+
+ u8 padding_1[4];
} __packed ap;
};
} __packed;
@@ -364,14 +371,18 @@ struct cmd_enabledisable_path {
struct wl1271_cmd_template_set {
struct wl1271_cmd_header header;
- __le16 len;
+ u8 role_id;
u8 template_type;
+ __le16 len;
u8 index; /* relevant only for KLV_TEMPLATE type */
+ u8 padding[3];
+
__le32 enabled_rates;
u8 short_retry_limit;
u8 long_retry_limit;
u8 aflags;
u8 reserved;
+
u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE];
} __packed;
@@ -388,6 +399,7 @@ struct wl1271_tim {
} __packed;
enum wl1271_cmd_ps_mode {
+ STATION_AUTO_PS_MODE, /* Dynamic Power Save */
STATION_ACTIVE_MODE,
STATION_POWER_SAVE_MODE
};
@@ -397,7 +409,7 @@ struct wl1271_cmd_ps_params {
u8 role_id;
u8 ps_mode; /* STATION_* */
- u8 padding[2];
+ u16 auto_ps_timeout;
} __packed;
/* HW encryption keys */
@@ -695,14 +707,18 @@ struct wl12xx_cmd_stop_fwlog {
struct wl12xx_cmd_channel_switch {
struct wl1271_cmd_header header;
+ u8 role_id;
+
/* The new serving channel */
u8 channel;
/* Relative time of the serving channel switch in TBTT units */
u8 switch_time;
- /* 1: Suspend TX till switch time; 0: Do not suspend TX */
- u8 tx_suspend;
- /* 1: Flush TX at switch time; 0: Do not flush */
- u8 flush;
+ /* Stop the role TX, should expect it after radar detection */
+ u8 stop_tx;
+ /* The target channel tx status 1-stopped 0-open*/
+ u8 post_switch_tx_disable;
+
+ u8 padding[3];
} __packed;
struct wl12xx_cmd_stop_channel_switch {
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 1bcfb017058d..3e581e19424c 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -66,7 +66,8 @@ enum {
};
enum {
- CONF_HW_RXTX_RATE_MCS7 = 0,
+ CONF_HW_RXTX_RATE_MCS7_SGI = 0,
+ CONF_HW_RXTX_RATE_MCS7,
CONF_HW_RXTX_RATE_MCS6,
CONF_HW_RXTX_RATE_MCS5,
CONF_HW_RXTX_RATE_MCS4,
@@ -91,6 +92,10 @@ enum {
CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
};
+/* Rates between and including these are MCS rates */
+#define CONF_HW_RXTX_RATE_MCS_MIN CONF_HW_RXTX_RATE_MCS7_SGI
+#define CONF_HW_RXTX_RATE_MCS_MAX CONF_HW_RXTX_RATE_MCS0
+
enum {
CONF_SG_DISABLE = 0,
CONF_SG_PROTECTIVE,
@@ -312,6 +317,10 @@ enum {
CONF_AP_BT_ACL_VAL_BT_SERVE_TIME,
CONF_AP_BT_ACL_VAL_WL_SERVE_TIME,
+ /* CTS Diluting params */
+ CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH,
+ CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER,
+
CONF_SG_TEMP_PARAM_1,
CONF_SG_TEMP_PARAM_2,
CONF_SG_TEMP_PARAM_3,
@@ -681,6 +690,9 @@ struct conf_tx_settings {
*/
u8 tmpl_short_retry_limit;
u8 tmpl_long_retry_limit;
+
+ /* Time in ms for Tx watchdog timer to expire */
+ u32 tx_watchdog_timeout;
};
enum {
@@ -810,6 +822,19 @@ struct conf_conn_settings {
u8 listen_interval;
/*
+ * Firmware wakeup conditions during suspend
+ * Range: CONF_WAKE_UP_EVENT_*
+ */
+ u8 suspend_wake_up_event;
+
+ /*
+ * Listen interval during suspend.
+ * Currently will be in DTIMs (1-10)
+ *
+ */
+ u8 suspend_listen_interval;
+
+ /*
* Enable or disable the beacon filtering.
*
* Range: CONF_BCN_FILT_MODE_*
@@ -868,13 +893,6 @@ struct conf_conn_settings {
u8 ps_poll_threshold;
/*
- * PS Poll failure recovery ACTIVE period length
- *
- * Range: u32 (ms)
- */
- u32 ps_poll_recovery_period;
-
- /*
* Configuration of signal average weights.
*/
struct conf_sig_weights sig_weights;
@@ -922,6 +940,18 @@ struct conf_conn_settings {
u8 psm_entry_nullfunc_retries;
/*
+ * Specifies the dynamic PS timeout in ms that will be used
+ * by the FW when in AUTO_PS mode
+ */
+ u16 dynamic_ps_timeout;
+
+ /*
+ * Specifies whether dynamic PS should be disabled and PSM forced.
+ * This is required for certain WiFi certification tests.
+ */
+ u8 forced_ps;
+
+ /*
*
* Specifies the interval of the connection keep-alive null-func
* frame in ms.
@@ -1055,6 +1085,14 @@ struct conf_scan_settings {
*/
u16 num_probe_reqs;
+ /*
+ * Scan trigger (split scan) timeout. The FW will split the scan
+ * operation into slices of the given time and allow the FW to schedule
+ * other tasks in between.
+ *
+ * Range: u32 Microsecs
+ */
+ u32 split_scan_timeout;
};
struct conf_sched_scan_settings {
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
index b85fd8c41e8f..ec0fdc25b280 100644
--- a/drivers/net/wireless/wl12xx/debug.h
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -51,6 +51,7 @@ enum {
DEBUG_FILTERS = BIT(15),
DEBUG_ADHOC = BIT(16),
DEBUG_AP = BIT(17),
+ DEBUG_PROBE = BIT(18),
DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
DEBUG_ALL = ~0,
};
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 15eb3a9c30ca..e1cf72765965 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -113,7 +113,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
if (ret < 0)
goto out;
- if (wl->state == WL1271_STATE_ON &&
+ if (wl->state == WL1271_STATE_ON && !wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
wl1271_acx_statistics(wl, wl->stats.fw_stats);
@@ -312,6 +312,181 @@ static const struct file_operations start_recovery_ops = {
.llseek = default_llseek,
};
+static ssize_t dynamic_ps_timeout_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+
+ return wl1271_format_buffer(user_buf, count,
+ ppos, "%d\n",
+ wl->conf.conn.dynamic_ps_timeout);
+}
+
+static ssize_t dynamic_ps_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in dynamic_ps");
+ return -EINVAL;
+ }
+
+ if (value < 1 || value > 65535) {
+ wl1271_warning("dyanmic_ps_timeout is not in valid range");
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.conn.dynamic_ps_timeout = value;
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* In case we're already in PSM, trigger it again to set new timeout
+ * immediately without waiting for re-association
+ */
+
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags))
+ wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE);
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations dynamic_ps_timeout_ops = {
+ .read = dynamic_ps_timeout_read,
+ .write = dynamic_ps_timeout_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t forced_ps_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+
+ return wl1271_format_buffer(user_buf, count,
+ ppos, "%d\n",
+ wl->conf.conn.forced_ps);
+}
+
+static ssize_t forced_ps_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ unsigned long value;
+ int ret, ps_mode;
+
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in forced_ps");
+ return -EINVAL;
+ }
+
+ if (value != 1 && value != 0) {
+ wl1271_warning("forced_ps should be either 0 or 1");
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->conf.conn.forced_ps == value)
+ goto out;
+
+ wl->conf.conn.forced_ps = value;
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* In case we're already in PSM, trigger it again to switch mode
+ * immediately without waiting for re-association
+ */
+
+ ps_mode = value ? STATION_POWER_SAVE_MODE : STATION_AUTO_PS_MODE;
+
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags))
+ wl1271_ps_set_mode(wl, wlvif, ps_mode);
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations forced_ps_ops = {
+ .read = forced_ps_read,
+ .write = forced_ps_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t split_scan_timeout_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+
+ return wl1271_format_buffer(user_buf, count,
+ ppos, "%d\n",
+ wl->conf.scan.split_scan_timeout / 1000);
+}
+
+static ssize_t split_scan_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in split_scan_timeout");
+ return -EINVAL;
+ }
+
+ if (value == 0)
+ wl1271_info("split scan will be disabled");
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.scan.split_scan_timeout = value * 1000;
+
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations split_scan_timeout_ops = {
+ .read = split_scan_timeout_read,
+ .write = split_scan_timeout_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static ssize_t driver_state_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -446,6 +621,7 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(sta.basic_rate_idx);
VIF_STATE_PRINT_INT(sta.ap_rate_idx);
VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+ VIF_STATE_PRINT_INT(sta.qos);
} else {
VIF_STATE_PRINT_INT(ap.global_hlid);
VIF_STATE_PRINT_INT(ap.bcast_hlid);
@@ -471,7 +647,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(default_key);
VIF_STATE_PRINT_INT(aid);
VIF_STATE_PRINT_INT(session_counter);
- VIF_STATE_PRINT_INT(ps_poll_failures);
VIF_STATE_PRINT_INT(psm_entry_retry);
VIF_STATE_PRINT_INT(power_level);
VIF_STATE_PRINT_INT(rssi_thold);
@@ -562,6 +737,64 @@ static const struct file_operations dtim_interval_ops = {
.llseek = default_llseek,
};
+
+
+static ssize_t suspend_dtim_interval_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ u8 value;
+
+ if (wl->conf.conn.suspend_wake_up_event == CONF_WAKE_UP_EVENT_DTIM ||
+ wl->conf.conn.suspend_wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM)
+ value = wl->conf.conn.suspend_listen_interval;
+ else
+ value = 0;
+
+ return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
+}
+
+static ssize_t suspend_dtim_interval_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value for suspend_dtim_interval");
+ return -EINVAL;
+ }
+
+ if (value < 1 || value > 10) {
+ wl1271_warning("suspend_dtim value is not in valid range");
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.conn.suspend_listen_interval = value;
+ /* for some reason there are different event types for 1 and >1 */
+ if (value == 1)
+ wl->conf.conn.suspend_wake_up_event = CONF_WAKE_UP_EVENT_DTIM;
+ else
+ wl->conf.conn.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM;
+
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+
+static const struct file_operations suspend_dtim_interval_ops = {
+ .read = suspend_dtim_interval_read,
+ .write = suspend_dtim_interval_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static ssize_t beacon_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -886,8 +1119,12 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(driver_state, rootdir);
DEBUGFS_ADD(vifs_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
+ DEBUGFS_ADD(suspend_dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
DEBUGFS_ADD(beacon_filtering, rootdir);
+ DEBUGFS_ADD(dynamic_ps_timeout, rootdir);
+ DEBUGFS_ADD(forced_ps, rootdir);
+ DEBUGFS_ADD(split_scan_timeout, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
if (!streaming || IS_ERR(streaming))
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index d3280df68f5d..c953717f38eb 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -30,133 +30,6 @@
#include "scan.h"
#include "wl12xx_80211.h"
-void wl1271_pspoll_work(struct work_struct *work)
-{
- struct ieee80211_vif *vif;
- struct wl12xx_vif *wlvif;
- struct delayed_work *dwork;
- struct wl1271 *wl;
- int ret;
-
- dwork = container_of(work, struct delayed_work, work);
- wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
- vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
- wl = wlvif->wl;
-
- wl1271_debug(DEBUG_EVENT, "pspoll work");
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
-
- if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
- goto out;
-
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- goto out;
-
- /*
- * if we end up here, then we were in powersave when the pspoll
- * delivery failure occurred, and no-one changed state since, so
- * we should go back to powersave.
- */
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
- wlvif->basic_rate, true);
-
- wl1271_ps_elp_sleep(wl);
-out:
- mutex_unlock(&wl->mutex);
-};
-
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
-{
- int delay = wl->conf.conn.ps_poll_recovery_period;
- int ret;
-
- wlvif->ps_poll_failures++;
- if (wlvif->ps_poll_failures == 1)
- wl1271_info("AP with dysfunctional ps-poll, "
- "trying to work around it.");
-
- /* force active mode receive data from the AP */
- if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
- ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
- wlvif->basic_rate, true);
- if (ret < 0)
- return;
- set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
- ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
- msecs_to_jiffies(delay));
- }
-
- /*
- * If already in active mode, lets we should be getting data from
- * the AP right away. If we enter PSM too fast after this, and data
- * remains on the AP, we will get another event like this, and we'll
- * go into active once more.
- */
-}
-
-static int wl1271_event_ps_report(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct event_mailbox *mbox,
- bool *beacon_loss)
-{
- int ret = 0;
- u32 total_retries = wl->conf.conn.psm_entry_retries;
-
- wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status);
-
- switch (mbox->ps_status) {
- case EVENT_ENTER_POWER_SAVE_FAIL:
- wl1271_debug(DEBUG_PSM, "PSM entry failed");
-
- if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
- /* remain in active mode */
- wlvif->psm_entry_retry = 0;
- break;
- }
-
- if (wlvif->psm_entry_retry < total_retries) {
- wlvif->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, wlvif,
- STATION_POWER_SAVE_MODE,
- wlvif->basic_rate, true);
- } else {
- wl1271_info("No ack to nullfunc from AP.");
- wlvif->psm_entry_retry = 0;
- *beacon_loss = true;
- }
- break;
- case EVENT_ENTER_POWER_SAVE_SUCCESS:
- wlvif->psm_entry_retry = 0;
-
- /*
- * BET has only a minor effect in 5GHz and masks
- * channel switch IEs, so we only enable BET on 2.4GHz
- */
- if (wlvif->band == IEEE80211_BAND_2GHZ)
- /* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, wlvif, true);
-
- if (wlvif->ps_compl) {
- complete(wlvif->ps_compl);
- wlvif->ps_compl = NULL;
- }
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void wl1271_event_rssi_trigger(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct event_mailbox *mbox)
@@ -205,21 +78,13 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
u8 enable)
{
- struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
if (enable) {
- /* disable dynamic PS when requested by the firmware */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_disable_dyn_ps(vif);
- }
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
wl12xx_for_each_wlvif_sta(wl, wlvif) {
- vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_enable_dyn_ps(vif);
wl1271_recalc_rx_streaming(wl, wlvif);
}
}
@@ -237,7 +102,6 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
{
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
- int ret;
u32 vector;
bool beacon_loss = false;
bool disconnect_sta = false;
@@ -293,21 +157,6 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
beacon_loss = true;
}
- if (vector & PS_REPORT_EVENT_ID) {
- wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- ret = wl1271_event_ps_report(wl, wlvif,
- mbox, &beacon_loss);
- if (ret < 0)
- return ret;
- }
- }
-
- if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- wl1271_event_pspoll_delivery_fail(wl, wlvif);
- }
-
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
/* TODO: check actual multi-role support */
wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
@@ -344,7 +193,6 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
/* TODO: configure only the relevant vif */
wl12xx_for_each_wlvif_sta(wl, wlvif) {
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
bool success;
if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
@@ -352,6 +200,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
continue;
success = mbox->channel_switch_status ? false : true;
+ vif = wl12xx_wlvif_to_vif(wlvif);
+
ieee80211_chswitch_done(vif, success);
}
}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 1d878ba47bf4..057d193d3525 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -51,10 +51,10 @@ enum {
SCAN_COMPLETE_EVENT_ID = BIT(10),
WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
- PS_REPORT_EVENT_ID = BIT(13),
+ RESERVED1 = BIT(13),
PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
- DISCONNECT_EVENT_COMPLETE_ID = BIT(15),
- /* BIT(16) is reserved */
+ ROLE_STOP_COMPLETE_EVENT_ID = BIT(15),
+ RADAR_DETECTED_EVENT_ID = BIT(16),
CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
@@ -94,9 +94,9 @@ struct event_mailbox {
u8 soft_gemini_sense_info;
u8 soft_gemini_protective_info;
s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
- u8 channel_switch_status;
+ u8 change_auto_mode_timeout;
u8 scheduled_scan_status;
- u8 ps_status;
+ u8 reserved4;
/* tuned channel (roc) */
u8 roc_channel;
@@ -119,17 +119,21 @@ struct event_mailbox {
u8 rx_ba_allowed;
u8 reserved_6[2];
+ /* Channel switch results */
+
+ u8 channel_switch_role_id;
+ u8 channel_switch_status;
+ u8 reserved_7[2];
+
u8 ps_poll_delivery_failure_role_ids;
u8 stopped_role_ids;
u8 started_role_ids;
- u8 change_auto_mode_timeout;
- u8 reserved_7[12];
+ u8 reserved_8[9];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
-void wl1271_pspoll_work(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index ca7ee59e4505..203fbebf09eb 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -37,54 +37,64 @@
int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret, i;
+ size_t max_size;
/* send empty templates for fw memory reservation */
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
- WL1271_CMD_TEMPL_DFLT_SIZE,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL, WL1271_CMD_TEMPL_DFLT_SIZE, 0,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_CFG_PROBE_REQ_5,
+ NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_NULL_DATA, NULL,
sizeof(struct wl12xx_null_data_template),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_PS_POLL, NULL,
sizeof(struct wl12xx_ps_poll_template),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
(struct ieee80211_qos_hdr),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_PROBE_RESPONSE, NULL,
WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_BEACON, NULL,
WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP, NULL,
- sizeof
- (struct wl12xx_arp_rsp_template),
+ max_size = sizeof(struct wl12xx_arp_rsp_template) +
+ WL1271_EXTRA_SPACE_MAX;
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_ARP_RSP, NULL,
+ max_size,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -93,19 +103,22 @@ int wl1271_init_templates_config(struct wl1271 *wl)
* Put very large empty placeholders for all templates. These
* reserve memory for later.
*/
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_AP_BEACON, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_DEAUTH_AP, NULL,
sizeof
(struct wl12xx_disconn_template),
0, WL1271_RATE_AUTOMATIC);
@@ -113,7 +126,8 @@ int wl1271_init_templates_config(struct wl1271 *wl)
return ret;
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_KLV, NULL,
sizeof(struct ieee80211_qos_hdr),
i, WL1271_RATE_AUTOMATIC);
if (ret < 0)
@@ -140,7 +154,8 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
IEEE80211_STYPE_DEAUTH);
rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_DEAUTH_AP,
tmpl, sizeof(*tmpl), 0, rate);
out:
@@ -172,7 +187,8 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl,
memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_NULL_DATA, nullfunc,
sizeof(*nullfunc), 0, rate);
out:
@@ -204,7 +220,8 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_QOS_NULL_DATA, qosnull,
sizeof(*qosnull), 0, rate);
out:
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index 079ad380e8ff..c574a3b31e31 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -45,6 +45,65 @@
#define OCP_STATUS_REQ_FAILED 0x20000
#define OCP_STATUS_RESP_ERROR 0x30000
+struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN] = {
+ [PART_DOWN] = {
+ .mem = {
+ .start = 0x00000000,
+ .size = 0x000177c0
+ },
+ .reg = {
+ .start = REGISTERS_BASE,
+ .size = 0x00008800
+ },
+ .mem2 = {
+ .start = 0x00000000,
+ .size = 0x00000000
+ },
+ .mem3 = {
+ .start = 0x00000000,
+ .size = 0x00000000
+ },
+ },
+
+ [PART_WORK] = {
+ .mem = {
+ .start = 0x00040000,
+ .size = 0x00014fc0
+ },
+ .reg = {
+ .start = REGISTERS_BASE,
+ .size = 0x0000a000
+ },
+ .mem2 = {
+ .start = 0x003004f8,
+ .size = 0x00000004
+ },
+ .mem3 = {
+ .start = 0x00040404,
+ .size = 0x00000000
+ },
+ },
+
+ [PART_DRPW] = {
+ .mem = {
+ .start = 0x00040000,
+ .size = 0x00014fc0
+ },
+ .reg = {
+ .start = DRPW_BASE,
+ .size = 0x00006000
+ },
+ .mem2 = {
+ .start = 0x00000000,
+ .size = 0x00000000
+ },
+ .mem3 = {
+ .start = 0x00000000,
+ .size = 0x00000000
+ }
+ }
+};
+
bool wl1271_set_block_size(struct wl1271 *wl)
{
if (wl->if_ops->set_block_size) {
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index d398cbcea986..4fb3dab8c3b2 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -43,6 +43,8 @@
#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
+extern struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN];
+
struct wl1271;
void wl1271_disable_interrupts(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index d5f55a149de5..39002363611e 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -1,3 +1,4 @@
+
/*
* This file is part of wl1271
*
@@ -115,6 +116,9 @@ static struct conf_drv_settings default_conf = {
[CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
[CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
[CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
+ /* CTS Diluting params */
+ [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
+ [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
},
.state = CONF_SG_PROTECTIVE,
},
@@ -213,10 +217,13 @@ static struct conf_drv_settings default_conf = {
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
+ .tx_watchdog_timeout = 5000,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
.listen_interval = 1,
+ .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
+ .suspend_listen_interval = 3,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
.bcn_filt_ie_count = 2,
.bcn_filt_ie = {
@@ -235,12 +242,13 @@ static struct conf_drv_settings default_conf = {
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
.ps_poll_threshold = 10,
- .ps_poll_recovery_period = 700,
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 50,
.psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
+ .dynamic_ps_timeout = 200,
+ .forced_ps = false,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
},
@@ -265,6 +273,7 @@ static struct conf_drv_settings default_conf = {
.min_dwell_time_passive = 100000,
.max_dwell_time_passive = 100000,
.num_probe_reqs = 2,
+ .split_scan_timeout = 50000,
},
.sched_scan = {
/* sched_scan requires dwell times in TU instead of TU/1000 */
@@ -384,15 +393,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
static void wl1271_op_stop(struct ieee80211_hw *hw);
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-static DEFINE_MUTEX(wl_list_mutex);
-static LIST_HEAD(wl_list);
-
-static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- unsigned char operstate)
+static int wl12xx_set_authorized(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- if (operstate != IF_OPER_UP)
+ if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
+ return -EINVAL;
+
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
return 0;
if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
@@ -407,76 +416,6 @@ static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_info("Association completed.");
return 0;
}
-static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
- void *arg)
-{
- struct net_device *dev = arg;
- struct wireless_dev *wdev;
- struct wiphy *wiphy;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- struct wl1271 *wl_temp;
- struct wl12xx_vif *wlvif;
- int ret = 0;
-
- /* Check that this notification is for us. */
- if (what != NETDEV_CHANGE)
- return NOTIFY_DONE;
-
- wdev = dev->ieee80211_ptr;
- if (wdev == NULL)
- return NOTIFY_DONE;
-
- wiphy = wdev->wiphy;
- if (wiphy == NULL)
- return NOTIFY_DONE;
-
- hw = wiphy_priv(wiphy);
- if (hw == NULL)
- return NOTIFY_DONE;
-
- wl_temp = hw->priv;
- mutex_lock(&wl_list_mutex);
- list_for_each_entry(wl, &wl_list, list) {
- if (wl == wl_temp)
- break;
- }
- mutex_unlock(&wl_list_mutex);
- if (wl != wl_temp)
- return NOTIFY_DONE;
-
- mutex_lock(&wl->mutex);
-
- if (wl->state == WL1271_STATE_OFF)
- goto out;
-
- if (dev->operstate != IF_OPER_UP)
- goto out;
- /*
- * The correct behavior should be just getting the appropriate wlvif
- * from the given dev, but currently we don't have a mac80211
- * interface for it.
- */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
-
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- wl1271_check_operstate(wl, wlvif,
- ieee80211_get_operstate(vif));
-
- wl1271_ps_elp_sleep(wl);
- }
-out:
- mutex_unlock(&wl->mutex);
-
- return NOTIFY_OK;
-}
static int wl1271_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
@@ -615,6 +554,80 @@ static void wl1271_rx_streaming_timer(unsigned long data)
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
+/* wl->mutex must be taken */
+void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
+{
+ /* if the watchdog is not armed, don't do anything */
+ if (wl->tx_allocated_blocks == 0)
+ return;
+
+ cancel_delayed_work(&wl->tx_watchdog_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
+ msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
+}
+
+static void wl12xx_tx_watchdog_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wl = container_of(dwork, struct wl1271, tx_watchdog_work);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ /* Tx went out in the meantime - everything is ok */
+ if (unlikely(wl->tx_allocated_blocks == 0))
+ goto out;
+
+ /*
+ * if a ROC is in progress, we might not have any Tx for a long
+ * time (e.g. pending Tx on the non-ROC channels)
+ */
+ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
+ wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
+ wl->conf.tx.tx_watchdog_timeout);
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ goto out;
+ }
+
+ /*
+ * if a scan is in progress, we might not have any Tx for a long
+ * time
+ */
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+ wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
+ wl->conf.tx.tx_watchdog_timeout);
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ goto out;
+ }
+
+ /*
+ * AP might cache a frame for a long time for a sleeping station,
+ * so rearm the timer if there's an AP interface with stations. If
+ * Tx is genuinely stuck we will most hopefully discover it when all
+ * stations are removed due to inactivity.
+ */
+ if (wl->active_sta_count) {
+ wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
+ " %d stations",
+ wl->conf.tx.tx_watchdog_timeout,
+ wl->active_sta_count);
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ goto out;
+ }
+
+ wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
+ wl->conf.tx.tx_watchdog_timeout);
+ wl12xx_queue_recovery_work(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static void wl1271_conf_init(struct wl1271 *wl)
{
@@ -672,8 +685,6 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
}
- if (ret < 0)
- return ret;
/* Chip-specific initializations */
ret = wl1271_chip_specific_init(wl);
@@ -809,6 +820,18 @@ static void wl12xx_fw_status(struct wl1271 *wl,
wl->tx_allocated_blocks -= freed_blocks;
+ /*
+ * If the FW freed some blocks:
+ * If we still have allocated blocks - re-arm the timer, Tx is
+ * not stuck. Otherwise, cancel the timer (no Tx currently).
+ */
+ if (freed_blocks) {
+ if (wl->tx_allocated_blocks)
+ wl12xx_rearm_tx_watchdog_locked(wl);
+ else
+ cancel_delayed_work(&wl->tx_watchdog_work);
+ }
+
avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
/*
@@ -985,16 +1008,70 @@ out:
return IRQ_HANDLED;
}
-static int wl1271_fetch_firmware(struct wl1271 *wl)
+struct vif_counter_data {
+ u8 counter;
+
+ struct ieee80211_vif *cur_vif;
+ bool cur_vif_running;
+};
+
+static void wl12xx_vif_count_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct vif_counter_data *counter = data;
+
+ counter->counter++;
+ if (counter->cur_vif == vif)
+ counter->cur_vif_running = true;
+}
+
+/* caller must not hold wl->mutex, as it might deadlock */
+static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *cur_vif,
+ struct vif_counter_data *data)
+{
+ memset(data, 0, sizeof(*data));
+ data->cur_vif = cur_vif;
+
+ ieee80211_iterate_active_interfaces(hw,
+ wl12xx_vif_count_iter, data);
+}
+
+static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
{
const struct firmware *fw;
const char *fw_name;
+ enum wl12xx_fw_type fw_type;
int ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fw_name = WL128X_FW_NAME;
- else
- fw_name = WL127X_FW_NAME;
+ if (plt) {
+ fw_type = WL12XX_FW_TYPE_PLT;
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fw_name = WL128X_PLT_FW_NAME;
+ else
+ fw_name = WL127X_PLT_FW_NAME;
+ } else {
+ /*
+ * we can't call wl12xx_get_vif_count() here because
+ * wl->mutex is taken, so use the cached last_vif_count value
+ */
+ if (wl->last_vif_count > 1) {
+ fw_type = WL12XX_FW_TYPE_MULTI;
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fw_name = WL128X_FW_NAME_MULTI;
+ else
+ fw_name = WL127X_FW_NAME_MULTI;
+ } else {
+ fw_type = WL12XX_FW_TYPE_NORMAL;
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fw_name = WL128X_FW_NAME_SINGLE;
+ else
+ fw_name = WL127X_FW_NAME_SINGLE;
+ }
+ }
+
+ if (wl->fw_type == fw_type)
+ return 0;
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
@@ -1013,6 +1090,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
}
vfree(wl->fw);
+ wl->fw_type = WL12XX_FW_TYPE_NONE;
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
@@ -1024,7 +1102,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
memcpy(wl->fw, fw->data, wl->fw_len);
ret = 0;
-
+ wl->fw_type = fw_type;
out:
release_firmware(fw);
@@ -1152,7 +1230,7 @@ static void wl1271_recovery_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->state != WL1271_STATE_ON)
+ if (wl->state != WL1271_STATE_ON || wl->plt)
goto out_unlock;
/* Avoid a recursive recovery */
@@ -1163,7 +1241,8 @@ static void wl1271_recovery_work(struct work_struct *work)
wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
- BUG_ON(bug_on_recovery);
+ BUG_ON(bug_on_recovery &&
+ !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
/*
* Advance security sequence number to overcome potential progress
@@ -1232,10 +1311,9 @@ static int wl1271_setup(struct wl1271 *wl)
return 0;
}
-static int wl1271_chip_wakeup(struct wl1271 *wl)
+static int wl12xx_set_power_on(struct wl1271 *wl)
{
- struct wl1271_partition_set partition;
- int ret = 0;
+ int ret;
msleep(WL1271_PRE_POWER_ON_SLEEP);
ret = wl1271_power_on(wl);
@@ -1245,20 +1323,22 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
wl1271_io_reset(wl);
wl1271_io_init(wl);
- /* We don't need a real memory partition here, because we only want
- * to use the registers at this point. */
- memset(&partition, 0, sizeof(partition));
- partition.reg.start = REGISTERS_BASE;
- partition.reg.size = REGISTERS_DOWN_SIZE;
- wl1271_set_partition(wl, &partition);
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
/* ELP module wake up */
wl1271_fw_wakeup(wl);
- /* whal_FwCtrl_BootSm() */
+out:
+ return ret;
+}
+
+static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
+{
+ int ret = 0;
- /* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
+ ret = wl12xx_set_power_on(wl);
+ if (ret < 0)
+ goto out;
/*
* For wl127x based devices we could use the default block
@@ -1307,11 +1387,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
goto out;
}
- if (wl->fw == NULL) {
- ret = wl1271_fetch_firmware(wl);
- if (ret < 0)
- goto out;
- }
+ ret = wl12xx_fetch_firmware(wl, plt);
+ if (ret < 0)
+ goto out;
/* No NVS from netlink, try to get it from the filesystem */
if (wl->nvs == NULL) {
@@ -1343,7 +1421,7 @@ int wl1271_plt_start(struct wl1271 *wl)
while (retries) {
retries--;
- ret = wl1271_chip_wakeup(wl);
+ ret = wl12xx_chip_wakeup(wl, true);
if (ret < 0)
goto power_off;
@@ -1355,7 +1433,8 @@ int wl1271_plt_start(struct wl1271 *wl)
if (ret < 0)
goto irq_disable;
- wl->state = WL1271_STATE_PLT;
+ wl->plt = true;
+ wl->state = WL1271_STATE_ON;
wl1271_notice("firmware booted in PLT mode (%s)",
wl->chip.fw_ver_str);
@@ -1391,41 +1470,52 @@ out:
return ret;
}
-static int __wl1271_plt_stop(struct wl1271 *wl)
+int wl1271_plt_stop(struct wl1271 *wl)
{
int ret = 0;
wl1271_notice("power down");
- if (wl->state != WL1271_STATE_PLT) {
+ /*
+ * Interrupts must be disabled before setting the state to OFF.
+ * Otherwise, the interrupt handler might be called and exit without
+ * reading the interrupt status.
+ */
+ wl1271_disable_interrupts(wl);
+ mutex_lock(&wl->mutex);
+ if (!wl->plt) {
+ mutex_unlock(&wl->mutex);
+
+ /*
+ * This will not necessarily enable interrupts as interrupts
+ * may have been disabled when op_stop was called. It will,
+ * however, balance the above call to disable_interrupts().
+ */
+ wl1271_enable_interrupts(wl);
+
wl1271_error("cannot power down because not in PLT "
"state: %d", wl->state);
ret = -EBUSY;
goto out;
}
- wl1271_power_off(wl);
-
- wl->state = WL1271_STATE_OFF;
- wl->rx_counter = 0;
-
mutex_unlock(&wl->mutex);
- wl1271_disable_interrupts(wl);
+
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
- mutex_lock(&wl->mutex);
-out:
- return ret;
-}
-
-int wl1271_plt_stop(struct wl1271 *wl)
-{
- int ret;
+ cancel_delayed_work_sync(&wl->elp_work);
+ cancel_delayed_work_sync(&wl->tx_watchdog_work);
mutex_lock(&wl->mutex);
- ret = __wl1271_plt_stop(wl);
+ wl1271_power_off(wl);
+ wl->flags = 0;
+ wl->state = WL1271_STATE_OFF;
+ wl->plt = false;
+ wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
+
+out:
return ret;
}
@@ -1457,7 +1547,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto out;
}
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
+ hlid, q, skb->len);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
wl->tx_queue_count[q]++;
@@ -1555,10 +1646,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
}
-static struct notifier_block wl1271_dev_notifier = {
- .notifier_call = wl1271_dev_notify,
-};
-
#ifdef CONFIG_PM
static int wl1271_configure_suspend_sta(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
@@ -1574,38 +1661,16 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
goto out_unlock;
- /* enter psm if needed*/
- if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
- DECLARE_COMPLETION_ONSTACK(compl);
-
- wlvif->ps_compl = &compl;
- ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
- wlvif->basic_rate, true);
- if (ret < 0)
- goto out_sleep;
-
- /* we must unlock here so we will be able to get events */
- wl1271_ps_elp_sleep(wl);
- mutex_unlock(&wl->mutex);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif,
+ wl->conf.conn.suspend_wake_up_event,
+ wl->conf.conn.suspend_listen_interval);
- ret = wait_for_completion_timeout(
- &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+ if (ret < 0)
+ wl1271_error("suspend: set wake up conditions failed: %d", ret);
- mutex_lock(&wl->mutex);
- if (ret <= 0) {
- wl1271_warning("couldn't enter ps mode!");
- ret = -EBUSY;
- goto out_cleanup;
- }
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_cleanup;
- }
-out_sleep:
wl1271_ps_elp_sleep(wl);
-out_cleanup:
- wlvif->ps_compl = NULL;
+
out_unlock:
mutex_unlock(&wl->mutex);
return ret;
@@ -1648,11 +1713,11 @@ static int wl1271_configure_suspend(struct wl1271 *wl,
static void wl1271_configure_resume(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
- int ret;
- bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ int ret = 0;
bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
- if (!is_sta && !is_ap)
+ if ((!is_ap) && (!is_sta))
return;
mutex_lock(&wl->mutex);
@@ -1661,12 +1726,16 @@ static void wl1271_configure_resume(struct wl1271 *wl,
goto out;
if (is_sta) {
- /* exit psm if it wasn't configured */
- if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
- wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
- wlvif->basic_rate, true);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif,
+ wl->conf.conn.wake_up_event,
+ wl->conf.conn.listen_interval);
+
+ if (ret < 0)
+ wl1271_error("resume: wake up conditions failed: %d",
+ ret);
+
} else if (is_ap) {
- wl1271_acx_beacon_filter_opt(wl, wlvif, false);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
wl1271_ps_elp_sleep(wl);
@@ -1684,6 +1753,8 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow || !wow->any);
+ wl1271_tx_flush(wl);
+
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_configure_suspend(wl, wlvif);
@@ -1709,9 +1780,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_enable_interrupts(wl);
flush_work(&wl->tx_work);
- wl12xx_for_each_wlvif(wl, wlvif) {
- flush_delayed_work(&wlvif->pspoll_work);
- }
flush_delayed_work(&wl->elp_work);
return 0;
@@ -1778,11 +1846,25 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+ /*
+ * Interrupts must be disabled before setting the state to OFF.
+ * Otherwise, the interrupt handler might be called and exit without
+ * reading the interrupt status.
+ */
+ wl1271_disable_interrupts(wl);
mutex_lock(&wl->mutex);
if (wl->state == WL1271_STATE_OFF) {
mutex_unlock(&wl->mutex);
+
+ /*
+ * This will not necessarily enable interrupts as interrupts
+ * may have been disabled when op_stop was called. It will,
+ * however, balance the above call to disable_interrupts().
+ */
+ wl1271_enable_interrupts(wl);
return;
}
+
/*
* this must be before the cancel_work calls below, so that the work
* functions don't perform further work.
@@ -1790,16 +1872,12 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->state = WL1271_STATE_OFF;
mutex_unlock(&wl->mutex);
- mutex_lock(&wl_list_mutex);
- list_del(&wl->list);
- mutex_unlock(&wl_list_mutex);
-
- wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
+ cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
wl12xx_tx_reset(wl, true);
@@ -1969,7 +2047,6 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl1271_rx_streaming_enable_work);
INIT_WORK(&wlvif->rx_streaming_disable_work,
wl1271_rx_streaming_disable_work);
- INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -1986,7 +2063,7 @@ static bool wl12xx_init_fw(struct wl1271 *wl)
while (retries) {
retries--;
- ret = wl1271_chip_wakeup(wl);
+ ret = wl12xx_chip_wakeup(wl, false);
if (ret < 0)
goto power_off;
@@ -2051,30 +2128,77 @@ static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
}
+/*
+ * Check whether a fw switch (i.e. moving from one loaded
+ * fw to another) is needed. This function is also responsible
+ * for updating wl->last_vif_count, so it must be called before
+ * loading a non-plt fw (so the correct fw (single-role/multi-role)
+ * will be used).
+ */
+static bool wl12xx_need_fw_change(struct wl1271 *wl,
+ struct vif_counter_data vif_counter_data,
+ bool add)
+{
+ enum wl12xx_fw_type current_fw = wl->fw_type;
+ u8 vif_count = vif_counter_data.counter;
+
+ if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
+ return false;
+
+ /* increase the vif count if this is a new vif */
+ if (add && !vif_counter_data.cur_vif_running)
+ vif_count++;
+
+ wl->last_vif_count = vif_count;
+
+ /* no need for fw change if the device is OFF */
+ if (wl->state == WL1271_STATE_OFF)
+ return false;
+
+ if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
+ return true;
+ if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
+ return true;
+
+ return false;
+}
+
+/*
+ * Enter "forced psm". Make sure the sta is in psm against the ap,
+ * to make the fw switch a bit more disconnection-persistent.
+ */
+static void wl12xx_force_active_psm(struct wl1271 *wl)
+{
+ struct wl12xx_vif *wlvif;
+
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
+ }
+}
+
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct vif_counter_data vif_count;
int ret = 0;
u8 role_type;
bool booted = false;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
ieee80211_vif_type_p2p(vif), vif->addr);
+ wl12xx_get_vif_count(hw, vif, &vif_count);
+
mutex_lock(&wl->mutex);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
- if (wl->vif) {
- wl1271_debug(DEBUG_MAC80211,
- "multiple vifs are not supported yet");
- ret = -EBUSY;
- goto out;
- }
-
/*
* in some very corner case HW recovery scenarios its possible to
* get here before __wl1271_op_remove_interface is complete, so
@@ -2086,6 +2210,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+
ret = wl12xx_init_vif_data(wl, vif);
if (ret < 0)
goto out;
@@ -2097,6 +2222,14 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ if (wl12xx_need_fw_change(wl, vif_count, true)) {
+ wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ mutex_unlock(&wl->mutex);
+ wl1271_recovery_work(&wl->recovery_work);
+ return 0;
+ }
+
/*
* TODO: after the nvs issue will be solved, move this block
* to start(), and make sure here the driver is ON.
@@ -2106,7 +2239,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
* we still need this in order to configure the fw
* while uploading the nvs
*/
- memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+ memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
booted = wl12xx_init_fw(wl);
if (!booted) {
@@ -2139,7 +2272,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- wl->vif = vif;
list_add(&wlvif->list, &wl->wlvif_list);
set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
@@ -2152,11 +2284,6 @@ out:
out_unlock:
mutex_unlock(&wl->mutex);
- mutex_lock(&wl_list_mutex);
- if (!ret)
- list_add(&wl->list, &wl_list);
- mutex_unlock(&wl_list_mutex);
-
return ret;
}
@@ -2172,20 +2299,20 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
return;
- wl->vif = NULL;
-
/* because of hardware recovery, we may get here twice */
if (wl->state != WL1271_STATE_ON)
return;
wl1271_info("down");
- /* enable dyn ps just in case (if left on due to fw crash etc) */
- if (wlvif->bss_type == BSS_TYPE_STA_BSS)
- ieee80211_enable_dyn_ps(vif);
-
if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
wl->scan_vif == vif) {
+ /*
+ * Rearm the tx watchdog just before idling scan. This
+ * prevents just-finished scans from triggering the watchdog
+ */
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_vif = NULL;
@@ -2250,10 +2377,10 @@ deinit:
wl->sta_count--;
mutex_unlock(&wl->mutex);
+
del_timer_sync(&wlvif->rx_streaming_timer);
cancel_work_sync(&wlvif->rx_streaming_enable_work);
cancel_work_sync(&wlvif->rx_streaming_disable_work);
- cancel_delayed_work_sync(&wlvif->pspoll_work);
mutex_lock(&wl->mutex);
}
@@ -2264,7 +2391,10 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl12xx_vif *iter;
+ struct vif_counter_data vif_count;
+ bool cancel_recovery = true;
+ wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
if (wl->state == WL1271_STATE_OFF ||
@@ -2283,20 +2413,34 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
break;
}
WARN_ON(iter != wlvif);
+ if (wl12xx_need_fw_change(wl, vif_count, false)) {
+ wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ wl12xx_queue_recovery_work(wl);
+ cancel_recovery = false;
+ }
out:
mutex_unlock(&wl->mutex);
- cancel_work_sync(&wl->recovery_work);
+ if (cancel_recovery)
+ cancel_work_sync(&wl->recovery_work);
}
static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type, bool p2p)
{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
wl1271_op_remove_interface(hw, vif);
- vif->type = ieee80211_iftype_p2p(new_type, p2p);
+ vif->type = new_type;
vif->p2p = p2p;
- return wl1271_op_add_interface(hw, vif);
+ ret = wl1271_op_add_interface(hw, vif);
+
+ clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
+ return ret;
}
static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
@@ -2317,6 +2461,9 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
wl1271_info("JOIN while associated.");
+ /* clear encryption type */
+ wlvif->encryption_type = KEY_NONE;
+
if (set_assoc)
set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
@@ -2467,71 +2614,61 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
- &wlvif->flags)) {
- if (wl12xx_dev_role_started(wlvif)) {
- /* roaming */
- ret = wl12xx_croc(wl,
- wlvif->dev_role_id);
- if (ret < 0)
- return ret;
- }
- ret = wl1271_join(wl, wlvif, false);
+ /*
+ * change the ROC channel. do it only if we are
+ * not idle. otherwise, CROC will be called
+ * anyway.
+ */
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags) &&
+ wl12xx_dev_role_started(wlvif) &&
+ !(conf->flags & IEEE80211_CONF_IDLE)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
- wl1271_warning("cmd join on channel "
- "failed %d", ret);
- } else {
- /*
- * change the ROC channel. do it only if we are
- * not idle. otherwise, CROC will be called
- * anyway.
- */
- if (wl12xx_dev_role_started(wlvif) &&
- !(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_stop_dev(wl, wlvif);
- if (ret < 0)
- return ret;
+ return ret;
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- return ret;
- }
+ ret = wl12xx_start_dev(wl, wlvif);
+ if (ret < 0)
+ return ret;
}
}
}
- /*
- * if mac80211 changes the PSM mode, make sure the mode is not
- * incorrectly changed after the pspoll failure active window.
- */
- if (changed & IEEE80211_CONF_CHANGE_PS)
- clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+ if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
- if (conf->flags & IEEE80211_CONF_PS &&
- !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
- set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
+ if ((conf->flags & IEEE80211_CONF_PS) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
- /*
- * We enter PSM only if we're already associated.
- * If we're not, we'll enter it when joining an SSID,
- * through the bss_info_changed() hook.
- */
- if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
- wl1271_debug(DEBUG_PSM, "psm enabled");
- ret = wl1271_ps_set_mode(wl, wlvif,
- STATION_POWER_SAVE_MODE,
- wlvif->basic_rate, true);
- }
- } else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
- wl1271_debug(DEBUG_PSM, "psm disabled");
+ int ps_mode;
+ char *ps_mode_str;
+
+ if (wl->conf.conn.forced_ps) {
+ ps_mode = STATION_POWER_SAVE_MODE;
+ ps_mode_str = "forced";
+ } else {
+ ps_mode = STATION_AUTO_PS_MODE;
+ ps_mode_str = "auto";
+ }
+
+ wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
+
+ ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
- clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
+ if (ret < 0)
+ wl1271_warning("enter %s ps failed %d",
+ ps_mode_str, ret);
+
+ } else if (!(conf->flags & IEEE80211_CONF_PS) &&
+ test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
+
+ wl1271_debug(DEBUG_PSM, "auto ps disabled");
- if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
ret = wl1271_ps_set_mode(wl, wlvif,
- STATION_ACTIVE_MODE,
- wlvif->basic_rate, true);
+ STATION_ACTIVE_MODE);
+ if (ret < 0)
+ wl1271_warning("exit auto ps failed %d", ret);
+ }
}
if (conf->power_level != wlvif->power_level) {
@@ -2971,6 +3108,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
wl1271_error("Could not add or replace key");
goto out_sleep;
}
+
+ /*
+ * reconfiguring arp response if the unicast (or common)
+ * encryption key type was changed
+ */
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ (sta || key_type == KEY_WEP) &&
+ wlvif->encryption_type != key_type) {
+ wlvif->encryption_type = key_type;
+ ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
+ if (ret < 0) {
+ wl1271_warning("build arp rsp failed: %d", ret);
+ goto out_sleep;
+ }
+ }
break;
case DISABLE_KEY:
@@ -3004,8 +3156,6 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-
int ret;
u8 *ssid = NULL;
size_t len = 0;
@@ -3033,17 +3183,13 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
- test_bit(wlvif->role_id, wl->roc_map)) {
+ /* fail if there is any role in ROC */
+ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
/* don't allow scanning right now */
ret = -EBUSY;
goto out_sleep;
}
- /* cancel ROC before scanning */
- if (wl12xx_dev_role_started(wlvif))
- wl12xx_stop_dev(wl, wlvif);
-
ret = wl1271_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3078,6 +3224,13 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
}
+
+ /*
+ * Rearm the tx watchdog just before idling scan. This
+ * prevents just-finished scans from triggering the watchdog
+ */
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_vif = NULL;
@@ -3105,6 +3258,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3136,6 +3294,9 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3263,6 +3424,7 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct sk_buff *skb;
int ret;
@@ -3270,7 +3432,7 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
if (!skb)
return -EOPNOTSUPP;
- ret = wl1271_cmd_template_set(wl,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_AP_PROBE_RESPONSE,
skb->data,
skb->len, 0,
@@ -3294,7 +3456,7 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
/* no need to change probe response if the SSID is set correctly */
if (wlvif->ssid_len > 0)
- return wl1271_cmd_template_set(wl,
+ return wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_data,
probe_rsp_len, 0,
@@ -3331,7 +3493,7 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
ptr, probe_rsp_len - (ptr - probe_rsp_data));
templ_len += probe_rsp_len - (ptr - probe_rsp_data);
- return wl1271_cmd_template_set(wl,
+ return wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_templ,
templ_len, 0,
@@ -3428,7 +3590,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
- ret = wl1271_cmd_template_set(wl, tmpl_id,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
beacon->data,
beacon->len, 0,
min_rate);
@@ -3467,7 +3629,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
beacon->len,
min_rate);
else
- ret = wl1271_cmd_template_set(wl,
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_PROBE_RESPONSE,
beacon->data,
beacon->len, 0,
@@ -3592,10 +3754,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
ibss_joined = true;
} else {
if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
- &wlvif->flags)) {
+ &wlvif->flags))
wl1271_unjoin(wl, wlvif);
- wl12xx_start_dev(wl, wlvif);
- }
}
}
@@ -3613,7 +3773,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
- if (changed & BSS_CHANGED_IDLE) {
+ if (changed & BSS_CHANGED_IDLE && !is_ibss) {
ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
if (ret < 0)
wl1271_warning("idle mode change failed %d", ret);
@@ -3631,7 +3791,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if (changed & BSS_CHANGED_BSSID)
+ if (changed & BSS_CHANGED_BSSID &&
+ (is_ibss || bss_conf->assoc))
if (!is_zero_ether_addr(bss_conf->bssid)) {
ret = wl12xx_cmd_build_null_data(wl, wlvif);
if (ret < 0)
@@ -3668,10 +3829,9 @@ sta_not_found:
u32 rates;
int ieoffset;
wlvif->aid = bss_conf->aid;
+ wlvif->beacon_int = bss_conf->beacon_int;
set_assoc = true;
- wlvif->ps_poll_failures = 0;
-
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
@@ -3731,9 +3891,6 @@ sta_not_found:
dev_kfree_skb(wlvif->probereq);
wlvif->probereq = NULL;
- /* re-enable dynamic ps - just in case */
- ieee80211_enable_dyn_ps(vif);
-
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
@@ -3753,7 +3910,6 @@ sta_not_found:
/* restore the bssid filter and go to dummy bssid */
if (was_assoc) {
- u32 conf_flags = wl->hw->conf.flags;
/*
* we might have to disable roc, if there was
* no IF_OPER_UP notification.
@@ -3776,7 +3932,7 @@ sta_not_found:
}
wl1271_unjoin(wl, wlvif);
- if (!(conf_flags & IEEE80211_CONF_IDLE))
+ if (!bss_conf->idle)
wl12xx_start_dev(wl, wlvif);
}
}
@@ -3807,34 +3963,6 @@ sta_not_found:
if (ret < 0)
goto out;
- if (changed & BSS_CHANGED_ARP_FILTER) {
- __be32 addr = bss_conf->arp_addr_list[0];
- WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
-
- if (bss_conf->arp_addr_cnt == 1 &&
- bss_conf->arp_filter_enabled) {
- /*
- * The template should have been configured only upon
- * association. however, it seems that the correct ip
- * isn't being set (when sending), so we have to
- * reconfigure the template upon every ip change.
- */
- ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
- if (ret < 0) {
- wl1271_warning("build arp rsp failed: %d", ret);
- goto out;
- }
-
- ret = wl1271_acx_arp_ip_filter(wl, wlvif,
- ACX_ARP_FILTER_ARP_FILTERING,
- addr);
- } else
- ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
-
- if (ret < 0)
- goto out;
- }
-
if (do_join) {
ret = wl1271_join(wl, wlvif, set_assoc);
if (ret < 0) {
@@ -3848,8 +3976,8 @@ sta_not_found:
if (ret < 0)
goto out;
- wl1271_check_operstate(wl, wlvif,
- ieee80211_get_operstate(vif));
+ if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
+ wl12xx_set_authorized(wl, wlvif);
}
/*
* stop device role if started (we might already be in
@@ -3860,19 +3988,6 @@ sta_not_found:
if (ret < 0)
goto out;
}
-
- /* If we want to go in PSM but we're not there yet */
- if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
- !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
- enum wl1271_cmd_ps_mode mode;
-
- mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, wlvif, mode,
- wlvif->basic_rate,
- true);
- if (ret < 0)
- goto out;
- }
}
/* Handle new association with HT. Do this after join. */
@@ -3914,6 +4029,41 @@ sta_not_found:
}
}
+ /* Handle arp filtering. Done after join. */
+ if ((changed & BSS_CHANGED_ARP_FILTER) ||
+ (!is_ibss && (changed & BSS_CHANGED_QOS))) {
+ __be32 addr = bss_conf->arp_addr_list[0];
+ wlvif->sta.qos = bss_conf->qos;
+ WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
+
+ if (bss_conf->arp_addr_cnt == 1 &&
+ bss_conf->arp_filter_enabled) {
+ wlvif->ip_addr = addr;
+ /*
+ * The template should have been configured only upon
+ * association. however, it seems that the correct ip
+ * isn't being set (when sending), so we have to
+ * reconfigure the template upon every ip change.
+ */
+ ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
+ if (ret < 0) {
+ wl1271_warning("build arp rsp failed: %d", ret);
+ goto out;
+ }
+
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif,
+ (ACX_ARP_FILTER_ARP_FILTERING |
+ ACX_ARP_FILTER_AUTO_ARP),
+ addr);
+ } else {
+ wlvif->ip_addr = 0;
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
+ }
+
+ if (ret < 0)
+ goto out;
+ }
+
out:
return;
}
@@ -4009,6 +4159,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u64 mactime = ULLONG_MAX;
int ret;
@@ -4023,7 +4174,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_acx_tsf_info(wl, &mactime);
+ ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
goto out_sleep;
@@ -4085,107 +4236,155 @@ void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
- wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
+
+ /*
+ * rearm the tx watchdog when the last STA is freed - give the FW a
+ * chance to return STA-buffered packets before complaining.
+ */
+ if (wl->active_sta_count == 0)
+ wl12xx_rearm_tx_watchdog_locked(wl);
}
-static int wl1271_op_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int wl12xx_sta_add(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
- struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
-
- if (wlvif->bss_type != BSS_TYPE_AP_BSS)
- goto out;
-
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
- goto out;
+ return ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_free_sta;
-
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
- goto out_sleep;
+ wl1271_free_sta(wl, wlvif, hlid);
- ret = wl12xx_cmd_set_peer_state(wl, hlid);
- if (ret < 0)
- goto out_sleep;
+ return ret;
+}
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, hlid);
- if (ret < 0)
- goto out_sleep;
+static int wl12xx_sta_remove(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271_station *wl_sta;
+ int ret = 0, id;
-out_sleep:
- wl1271_ps_elp_sleep(wl);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ id = wl_sta->hlid;
+ if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
+ return -EINVAL;
-out_free_sta:
+ ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
if (ret < 0)
- wl1271_free_sta(wl, wlvif, hlid);
+ return ret;
-out:
- mutex_unlock(&wl->mutex);
+ wl1271_free_sta(wl, wlvif, wl_sta->hlid);
return ret;
}
-static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int wl12xx_update_sta_state(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
{
- struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
- int ret = 0, id;
+ u8 hlid;
+ bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ int ret;
- mutex_lock(&wl->mutex);
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
+ /* Add station (AP mode) */
+ if (is_ap &&
+ old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return wl12xx_sta_add(wl, wlvif, sta);
+
+ /* Remove station (AP mode) */
+ if (is_ap &&
+ old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ /* must not fail */
+ wl12xx_sta_remove(wl, wlvif, sta);
+ return 0;
+ }
- if (wlvif->bss_type != BSS_TYPE_AP_BSS)
- goto out;
+ /* Authorize station (AP mode) */
+ if (is_ap &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = wl12xx_cmd_set_peer_state(wl, hlid);
+ if (ret < 0)
+ return ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
+ hlid);
+ return ret;
+ }
- wl_sta = (struct wl1271_station *)sta->drv_priv;
- id = wl_sta->hlid;
- if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
+ /* Authorize station */
+ if (is_sta &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
+ return wl12xx_set_authorized(wl, wlvif);
+ }
+
+ if (is_sta &&
+ old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
+ sta->aid, old_state, new_state);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EBUSY;
goto out;
+ }
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
- if (ret < 0)
- goto out_sleep;
+ ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
- wl1271_free_sta(wl, wlvif, wl_sta->hlid);
-
-out_sleep:
wl1271_ps_elp_sleep(wl);
-
out:
mutex_unlock(&wl->mutex);
+ if (new_state < old_state)
+ return 0;
return ret;
}
@@ -4354,6 +4553,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
+ wl1271_tx_flush(wl);
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -4370,7 +4571,7 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
/* TODO: change mac80211 to pass vif as param */
wl12xx_for_each_wlvif_sta(wl, wlvif) {
- ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+ ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
if (!ret)
set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
@@ -4464,6 +4665,7 @@ static struct ieee80211_channel wl1271_channels[] = {
/* mapping to indexes for wl1271_rates */
static const u8 wl1271_rate_to_idx_2ghz[] = {
/* MCS rates are used only with 11n */
+ 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
7, /* CONF_HW_RXTX_RATE_MCS7 */
6, /* CONF_HW_RXTX_RATE_MCS6 */
5, /* CONF_HW_RXTX_RATE_MCS5 */
@@ -4585,6 +4787,7 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
/* mapping to indexes for wl1271_rates_5ghz */
static const u8 wl1271_rate_to_idx_5ghz[] = {
/* MCS rates are used only with 11n */
+ 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
7, /* CONF_HW_RXTX_RATE_MCS7 */
6, /* CONF_HW_RXTX_RATE_MCS6 */
5, /* CONF_HW_RXTX_RATE_MCS5 */
@@ -4650,8 +4853,7 @@ static const struct ieee80211_ops wl1271_ops = {
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
- .sta_add = wl1271_op_sta_add,
- .sta_remove = wl1271_op_sta_remove,
+ .sta_state = wl12xx_op_sta_state,
.ampdu_action = wl1271_op_ampdu_action,
.tx_frames_pending = wl1271_tx_frames_pending,
.set_bitrate_mask = wl12xx_set_bitrate_mask,
@@ -4825,13 +5027,120 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
+static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
+{
+ bool supported = false;
+ u8 major, minor;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
+ minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
+
+ /* in wl128x we have the MAC address if the PG is >= (2, 1) */
+ if (major > 2 || (major == 2 && minor >= 1))
+ supported = true;
+ } else {
+ major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
+ minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
+
+ /* in wl127x we have the MAC address if the PG is >= (3, 1) */
+ if (major == 3 && minor >= 1)
+ supported = true;
+ }
+
+ wl1271_debug(DEBUG_PROBE,
+ "PG Ver major = %d minor = %d, MAC %s present",
+ major, minor, supported ? "is" : "is not");
+
+ return supported;
+}
+
+static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
+ u32 oui, u32 nic, int n)
+{
+ int i;
+
+ wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
+ oui, nic, n);
+
+ if (nic + n - 1 > 0xffffff)
+ wl1271_warning("NIC part of the MAC address wraps around!");
+
+ for (i = 0; i < n; i++) {
+ wl->addresses[i].addr[0] = (u8)(oui >> 16);
+ wl->addresses[i].addr[1] = (u8)(oui >> 8);
+ wl->addresses[i].addr[2] = (u8) oui;
+ wl->addresses[i].addr[3] = (u8)(nic >> 16);
+ wl->addresses[i].addr[4] = (u8)(nic >> 8);
+ wl->addresses[i].addr[5] = (u8) nic;
+ nic++;
+ }
+
+ wl->hw->wiphy->n_addresses = n;
+ wl->hw->wiphy->addresses = wl->addresses;
+}
+
+static void wl12xx_get_fuse_mac(struct wl1271 *wl)
+{
+ u32 mac1, mac2;
+
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
+
+ mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
+ mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
+
+ /* these are the two parts of the BD_ADDR */
+ wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
+ ((mac1 & 0xff000000) >> 24);
+ wl->fuse_nic_addr = mac1 & 0xffffff;
+
+ wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
+}
+
+static int wl12xx_get_hw_info(struct wl1271 *wl)
+{
+ int ret;
+ u32 die_info;
+
+ ret = wl12xx_set_power_on(wl);
+ if (ret < 0)
+ goto out;
+
+ wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
+
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
+ else
+ die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
+
+ wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
+
+ if (!wl12xx_mac_in_fuse(wl)) {
+ wl->fuse_oui_addr = 0;
+ wl->fuse_nic_addr = 0;
+ } else {
+ wl12xx_get_fuse_mac(wl);
+ }
+
+ wl1271_power_off(wl);
+out:
+ return ret;
+}
+
static int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
+ u32 oui_addr = 0, nic_addr = 0;
if (wl->mac80211_registered)
return 0;
+ ret = wl12xx_get_hw_info(wl);
+ if (ret < 0) {
+ wl1271_error("couldn't get hw info");
+ goto out;
+ }
+
ret = wl1271_fetch_nvs(wl);
if (ret == 0) {
/* NOTE: The wl->nvs->nvs element must be first, in
@@ -4840,39 +5149,42 @@ static int wl1271_register_hw(struct wl1271 *wl)
*/
u8 *nvs_ptr = (u8 *)wl->nvs;
- wl->mac_addr[0] = nvs_ptr[11];
- wl->mac_addr[1] = nvs_ptr[10];
- wl->mac_addr[2] = nvs_ptr[6];
- wl->mac_addr[3] = nvs_ptr[5];
- wl->mac_addr[4] = nvs_ptr[4];
- wl->mac_addr[5] = nvs_ptr[3];
+ oui_addr =
+ (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
+ nic_addr =
+ (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
}
- SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
+ /* if the MAC address is zeroed in the NVS derive from fuse */
+ if (oui_addr == 0 && nic_addr == 0) {
+ oui_addr = wl->fuse_oui_addr;
+ /* fuse has the BD_ADDR, the WLAN addresses are the next two */
+ nic_addr = wl->fuse_nic_addr + 1;
+ }
+
+ wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
ret = ieee80211_register_hw(wl->hw);
if (ret < 0) {
wl1271_error("unable to register mac80211 hw: %d", ret);
- return ret;
+ goto out;
}
wl->mac80211_registered = true;
wl1271_debugfs_init(wl);
- register_netdevice_notifier(&wl1271_dev_notifier);
-
wl1271_notice("loaded");
- return 0;
+out:
+ return ret;
}
static void wl1271_unregister_hw(struct wl1271 *wl)
{
- if (wl->state == WL1271_STATE_PLT)
- __wl1271_plt_stop(wl);
+ if (wl->plt)
+ wl1271_plt_stop(wl);
- unregister_netdevice_notifier(&wl1271_dev_notifier);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
@@ -4889,7 +5201,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
};
/* The tx descriptor buffer and the TKIP space. */
- wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
+ wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
sizeof(struct wl1271_tx_hw_descr);
/* unit us */
@@ -4898,17 +5210,17 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_SUPPORTS_CQM_RSSI |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
+ IEEE80211_HW_SCAN_WHILE_IDLE;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -4924,10 +5236,10 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
* should be the maximum length possible for a template, without
* the IEEE80211 header of the template
*/
- wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_DFLT_SIZE -
+ wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
- wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_DFLT_SIZE -
+ wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -4993,7 +5305,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
- INIT_LIST_HEAD(&wl->list);
INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
@@ -5010,6 +5321,7 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+ INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -5021,7 +5333,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->band = IEEE80211_BAND_2GHZ;
- wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
@@ -5046,6 +5357,7 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
spin_lock_init(&wl->wl_lock);
wl->state = WL1271_STATE_OFF;
+ wl->fw_type = WL12XX_FW_TYPE_NONE;
mutex_init(&wl->mutex);
/* Apply default driver configuration. */
@@ -5113,6 +5425,7 @@ static int wl1271_free_hw(struct wl1271 *wl)
vfree(wl->fw);
wl->fw = NULL;
+ wl->fw_type = WL12XX_FW_TYPE_NONE;
kfree(wl->nvs);
wl->nvs = NULL;
@@ -5299,7 +5612,7 @@ module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
module_param_named(fwlog, fwlog_param, charp, 0);
-MODULE_PARM_DESC(keymap,
+MODULE_PARM_DESC(fwlog,
"FW logger options: continuous, ondemand, dbgpins or disable");
module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index a2bdacdd7e1d..78f598b4f97b 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -56,7 +56,7 @@ void wl1271_elp_work(struct work_struct *work)
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
goto out;
- if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
goto out;
}
@@ -69,8 +69,6 @@ out:
mutex_unlock(&wl->mutex);
}
-#define ELP_ENTRY_DELAY 5
-
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
@@ -84,13 +82,13 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return;
- if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return;
}
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(ELP_ENTRY_DELAY));
+ msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout));
}
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
@@ -160,28 +158,39 @@ out:
}
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
+ enum wl1271_cmd_ps_mode mode)
{
int ret;
+ u16 timeout = wl->conf.conn.dynamic_ps_timeout;
switch (mode) {
+ case STATION_AUTO_PS_MODE:
case STATION_POWER_SAVE_MODE:
- wl1271_debug(DEBUG_PSM, "entering psm");
+ wl1271_debug(DEBUG_PSM, "entering psm (mode=%d,timeout=%u)",
+ mode, timeout);
- ret = wl1271_acx_wake_up_conditions(wl, wlvif);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif,
+ wl->conf.conn.wake_up_event,
+ wl->conf.conn.listen_interval);
if (ret < 0) {
wl1271_error("couldn't set wake up conditions");
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, mode, timeout);
if (ret < 0)
return ret;
- set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
+ set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
+
+ /* enable beacon early termination. Not relevant for 5GHz */
+ if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ ret = wl1271_acx_bet_enable(wl, wlvif, true);
+ if (ret < 0)
+ return ret;
+ }
break;
case STATION_ACTIVE_MODE:
- default:
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
@@ -191,12 +200,15 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, mode, 0);
if (ret < 0)
return ret;
- clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
+ clear_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
break;
+ default:
+ wl1271_warning("trying to set ps to unsupported mode %d", mode);
+ ret = -EINVAL;
}
return ret;
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index a12052f02026..5f19d4fbbf27 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -28,7 +28,7 @@
#include "acx.h"
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
+ enum wl1271_cmd_ps_mode mode);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index df34d5977b98..340db324bc26 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -525,4 +525,31 @@ b12-b0 - Supported Rate indicator bits as defined below.
*/
#define INTR_TRIG_TX_PROC1 BIT(18)
+#define WL127X_REG_FUSE_DATA_2_1 0x050a
+#define WL128X_REG_FUSE_DATA_2_1 0x2152
+#define PG_VER_MASK 0x3c
+#define PG_VER_OFFSET 2
+
+#define WL127X_PG_MAJOR_VER_MASK 0x3
+#define WL127X_PG_MAJOR_VER_OFFSET 0x0
+#define WL127X_PG_MINOR_VER_MASK 0xc
+#define WL127X_PG_MINOR_VER_OFFSET 0x2
+
+#define WL128X_PG_MAJOR_VER_MASK 0xc
+#define WL128X_PG_MAJOR_VER_OFFSET 0x2
+#define WL128X_PG_MINOR_VER_MASK 0x3
+#define WL128X_PG_MINOR_VER_OFFSET 0x0
+
+#define WL127X_PG_GET_MAJOR(pg_ver) ((pg_ver & WL127X_PG_MAJOR_VER_MASK) >> \
+ WL127X_PG_MAJOR_VER_OFFSET)
+#define WL127X_PG_GET_MINOR(pg_ver) ((pg_ver & WL127X_PG_MINOR_VER_MASK) >> \
+ WL127X_PG_MINOR_VER_OFFSET)
+#define WL128X_PG_GET_MAJOR(pg_ver) ((pg_ver & WL128X_PG_MAJOR_VER_MASK) >> \
+ WL128X_PG_MAJOR_VER_OFFSET)
+#define WL128X_PG_GET_MINOR(pg_ver) ((pg_ver & WL128X_PG_MINOR_VER_MASK) >> \
+ WL128X_PG_MINOR_VER_OFFSET)
+
+#define WL12XX_REG_FUSE_BD_ADDR_1 0x00310eb4
+#define WL12XX_REG_FUSE_BD_ADDR_2 0x00310eb8
+
#endif
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 4fbd2a722ffa..cfa6071704c5 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -113,7 +113,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
* In PLT mode we seem to get frames and mac80211 warns about them,
* workaround this by not retrieving them at all.
*/
- if (unlikely(wl->state == WL1271_STATE_PLT))
+ if (unlikely(wl->plt))
return -EINVAL;
/* the data read starts with the descriptor */
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index e24111ececc5..fcba055ef196 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -38,7 +38,6 @@ void wl1271_scan_complete_work(struct work_struct *work)
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
int ret;
- bool is_sta, is_ibss;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, scan_complete_work);
@@ -56,6 +55,12 @@ void wl1271_scan_complete_work(struct work_struct *work)
vif = wl->scan_vif;
wlvif = wl12xx_vif_to_data(vif);
+ /*
+ * Rearm the tx watchdog just before idling scan. This
+ * prevents just-finished scans from triggering the watchdog
+ */
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
@@ -70,15 +75,6 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
- /* return to ROC if needed */
- is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
- is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
- if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
- (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
- !test_bit(wlvif->dev_role_id, wl->roc_map)) {
- /* restore remain on channel */
- wl12xx_start_dev(wl, wlvif);
- }
wl1271_ps_elp_sleep(wl);
if (wl->scan.failed) {
@@ -182,14 +178,23 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
goto out;
}
+ if (wl->conf.scan.split_scan_timeout)
+ scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
+
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS ||
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ cmd->params.role_id = wlvif->role_id;
+ else
+ cmd->params.role_id = wlvif->dev_role_id;
+
+ if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
- cmd->params.role_id = wlvif->role_id;
+
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -202,7 +207,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tid_trigger = 0;
+ cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
if (band == IEEE80211_BAND_2GHZ)
@@ -217,16 +222,17 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
memcpy(cmd->addr, vif->addr, ETH_ALEN);
- ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
- wl->scan.ssid_len, wl->scan.req->ie,
- wl->scan.req->ie_len, band);
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ cmd->params.role_id, band,
+ wl->scan.ssid, wl->scan.ssid_len,
+ wl->scan.req->ie,
+ wl->scan.req->ie_len);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
}
- /* disable the timeout */
- trigger->timeout = 0;
+ trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
sizeof(*trigger), 0);
if (ret < 0) {
@@ -658,11 +664,13 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[0]) {
- ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
+ u8 band = IEEE80211_BAND_2GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ wlvif->dev_role_id, band,
+ req->ssids[0].ssid,
req->ssids[0].ssid_len,
- ies->ie[IEEE80211_BAND_2GHZ],
- ies->len[IEEE80211_BAND_2GHZ],
- IEEE80211_BAND_2GHZ);
+ ies->ie[band],
+ ies->len[band]);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
@@ -670,11 +678,13 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
+ u8 band = IEEE80211_BAND_5GHZ;
+ ret = wl12xx_cmd_build_probe_req(wl, wlvif,
+ wlvif->dev_role_id, band,
+ req->ssids[0].ssid,
req->ssids[0].ssid_len,
- ies->ie[IEEE80211_BAND_5GHZ],
- ies->len[IEEE80211_BAND_5GHZ],
- IEEE80211_BAND_5GHZ);
+ ies->ie[band],
+ ies->len[band]);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index a7ed43dc08c9..96ff457a3a0b 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -48,7 +48,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl);
#define WL1271_SCAN_CURRENT_TX_PWR 0
#define WL1271_SCAN_OPT_ACTIVE 0
#define WL1271_SCAN_OPT_PASSIVE 1
-#define WL1271_SCAN_OPT_TRIGGERED_SCAN 2
+#define WL1271_SCAN_OPT_SPLIT_SCAN 2
#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
/* scan even if we fail to enter psm */
#define WL1271_SCAN_OPT_FORCE 8
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 468a50553fac..4b3c32774bae 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -74,6 +74,8 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ sdio_claim_host(func);
+
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -88,6 +90,8 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
addr, len);
}
+ sdio_release_host(func);
+
if (ret)
dev_err(child->parent, "sdio read failed (%d)\n", ret);
}
@@ -99,6 +103,8 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ sdio_claim_host(func);
+
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
@@ -113,6 +119,8 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
ret = sdio_memcpy_toio(func, addr, buf, len);
}
+ sdio_release_host(func);
+
if (ret)
dev_err(child->parent, "sdio write failed (%d)\n", ret);
}
@@ -136,6 +144,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
sdio_claim_host(func);
sdio_enable_func(func);
+ sdio_release_host(func);
out:
return ret;
@@ -146,6 +155,7 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
@@ -314,9 +324,6 @@ static int wl1271_suspend(struct device *dev)
dev_err(dev, "error while trying to keep power\n");
goto out;
}
-
- /* release host */
- sdio_release_host(func);
}
out:
return ret;
@@ -324,15 +331,7 @@ out:
static int wl1271_resume(struct device *dev)
{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
- struct wl1271 *wl = platform_get_drvdata(glue->core);
-
dev_dbg(dev, "wl1271 resume\n");
- if (wl->wow_enabled) {
- /* claim back host */
- sdio_claim_host(func);
- }
return 0;
}
@@ -371,5 +370,9 @@ module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL127X_FW_NAME);
-MODULE_FIRMWARE(WL128X_FW_NAME);
+MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
+MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
+MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
+MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
+MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
+MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 92caa7ce6053..2fc18a8dcce8 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -433,6 +433,10 @@ module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL127X_FW_NAME);
-MODULE_FIRMWARE(WL128X_FW_NAME);
+MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
+MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
+MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
+MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
+MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
+MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 25093c0cb0ed..1e93bb9c0246 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -30,6 +30,7 @@
#include "acx.h"
#include "reg.h"
#include "ps.h"
+#include "io.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -41,6 +42,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER,
+ WL1271_TM_CMD_GET_MAC,
__WL1271_TM_CMD_AFTER_LAST
};
@@ -264,6 +266,52 @@ static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
return 0;
}
+static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ u8 mac_addr[ETH_ALEN];
+ int ret = 0;
+
+ mutex_lock(&wl->mutex);
+
+ if (!wl->plt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ mac_addr[0] = (u8)(wl->fuse_oui_addr >> 16);
+ mac_addr[1] = (u8)(wl->fuse_oui_addr >> 8);
+ mac_addr[2] = (u8) wl->fuse_oui_addr;
+ mac_addr[3] = (u8)(wl->fuse_nic_addr >> 16);
+ mac_addr[4] = (u8)(wl->fuse_nic_addr >> 8);
+ mac_addr[5] = (u8) wl->fuse_nic_addr;
+
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, ETH_ALEN);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ goto out;
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+
+nla_put_failure:
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out;
+}
+
int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
{
struct wl1271 *wl = hw->priv;
@@ -288,6 +336,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
return wl1271_tm_cmd_set_plt_mode(wl, tb);
case WL1271_TM_CMD_RECOVER:
return wl1271_tm_cmd_recover(wl, tb);
+ case WL1271_TM_CMD_GET_MAC:
+ return wl12xx_tm_cmd_get_mac(wl, tb);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 4508ccd78328..43ae49143d68 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -77,35 +77,6 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
}
}
-static int wl1271_tx_update_filters(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
- int ret;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- /*
- * stop bssid-based filtering before transmitting authentication
- * requests. this way the hw will never drop authentication
- * responses coming from BSSIDs it isn't familiar with (e.g. on
- * roaming)
- */
- if (!ieee80211_is_auth(hdr->frame_control))
- return 0;
-
- if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
- goto out;
-
- wl1271_debug(DEBUG_CMD, "starting device role for roaming");
- ret = wl12xx_start_dev(wl, wlvif);
- if (ret < 0)
- goto out;
-out:
- return 0;
-}
-
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
struct sk_buff *skb)
{
@@ -187,8 +158,6 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
- wl1271_tx_update_filters(wl, wlvif, skb);
-
if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
!ieee80211_is_auth(hdr->frame_control) &&
@@ -257,6 +226,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
+ /* If the FW was empty before, arm the Tx watchdog */
+ if (wl->tx_allocated_blocks == total_blocks)
+ wl12xx_rearm_tx_watchdog_locked(wl);
+
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
@@ -286,16 +259,20 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int aligned_len, ac, rate_idx;
s64 hosttime;
u16 tx_attr = 0;
+ __le16 frame_control;
+ struct ieee80211_hdr *hdr;
+ u8 *frame_start;
bool is_dummy;
desc = (struct wl1271_tx_hw_descr *) skb->data;
+ frame_start = (u8 *)(desc + 1);
+ hdr = (struct ieee80211_hdr *)(frame_start + extra);
+ frame_control = hdr->frame_control;
/* relocate space for security header */
if (extra) {
- void *framestart = skb->data + sizeof(*desc);
- u16 fc = *(u16 *)(framestart + extra);
- int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
- memmove(framestart, framestart + extra, hdrlen);
+ int hdrlen = ieee80211_hdrlen(frame_control);
+ memmove(frame_start, hdr, hdrlen);
}
/* configure packet life time */
@@ -384,6 +361,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
desc->wl127x_mem.total_mem_blocks);
}
+ /* for WEP shared auth - no fw encryption is needed */
+ if (ieee80211_is_auth(frame_control) &&
+ ieee80211_has_protected(frame_control))
+ tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
+
desc->tx_attr = cpu_to_le16(tx_attr);
}
@@ -408,7 +390,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
- extra = WL1271_TKIP_IV_SPACE;
+ extra = WL1271_EXTRA_SPACE_TKIP;
if (info->control.hw_key) {
bool is_wep;
@@ -549,6 +531,7 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
+ WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -593,6 +576,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
+ /* continue from last wlvif (round robin) */
if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) {
skb = wl12xx_vif_skb_dequeue(wl, wlvif);
@@ -603,7 +587,11 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
}
}
- /* do another pass */
+ /* dequeue from the system HLID before the restarting wlvif list */
+ if (!skb)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+
+ /* do a new pass over the wlvif list */
if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) {
skb = wl12xx_vif_skb_dequeue(wl, wlvif);
@@ -611,12 +599,16 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
wl->last_wlvif = wlvif;
break;
}
+
+ /*
+ * No need to continue after last_wlvif. The previous
+ * pass should have found it.
+ */
+ if (wlvif == wl->last_wlvif)
+ break;
}
}
- if (!skb)
- skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
-
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
@@ -624,6 +616,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
skb = wl->dummy_packet;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
+ WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -795,6 +788,18 @@ out:
mutex_unlock(&wl->mutex);
}
+static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
+{
+ u8 flags = 0;
+
+ if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN &&
+ rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX)
+ flags |= IEEE80211_TX_RC_MCS;
+ if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI)
+ flags |= IEEE80211_TX_RC_SHORT_GI;
+ return flags;
+}
+
static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct wl1271_tx_hw_res_descr *result)
{
@@ -804,6 +809,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct sk_buff *skb;
int id = result->id;
int rate = -1;
+ u8 rate_flags = 0;
u8 retries = 0;
/* check for id legality */
@@ -830,6 +836,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
info->flags |= IEEE80211_TX_STAT_ACK;
rate = wl1271_rate_to_idx(result->rate_class_index,
wlvif->band);
+ rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
retries = result->ack_failures;
} else if (result->status == TX_RETRY_EXCEEDED) {
wl->stats.excessive_retries++;
@@ -838,7 +845,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
info->status.rates[0].idx = rate;
info->status.rates[0].count = retries;
- info->status.rates[0].flags = 0;
+ info->status.rates[0].flags = rate_flags;
info->status.ack_signal = -1;
wl->stats.retry_count += result->ack_failures;
@@ -869,8 +876,9 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
- skb_pull(skb, WL1271_TKIP_IV_SPACE);
+ memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
+ hdrlen);
+ skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
}
wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
@@ -966,7 +974,6 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
else
wlvif->sta.ba_rx_bitmap = 0;
- wl1271_tx_reset_link_queues(wl, i);
wl->links[i].allocated_pkts = 0;
wl->links[i].prev_freed_pkts = 0;
}
@@ -980,8 +987,14 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- for (i = 0; i < NUM_TX_QUEUES; i++)
- wl->tx_queue_count[i] = 0;
+ /* only reset the queues if something bad happened */
+ if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
+ for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ wl1271_tx_reset_link_queues(wl, i);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wl->tx_queue_count[i] = 0;
+ }
wl->stopped_queues_map = 0;
@@ -1012,9 +1025,9 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
info->control.hw_key->cipher ==
WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + WL1271_TKIP_IV_SPACE,
+ memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
skb->data, hdrlen);
- skb_pull(skb, WL1271_TKIP_IV_SPACE);
+ skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
}
info->status.rates[0].idx = -1;
@@ -1031,6 +1044,7 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
void wl1271_tx_flush(struct wl1271 *wl)
{
unsigned long timeout;
+ int i;
timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
while (!time_after(jiffies, timeout)) {
@@ -1048,6 +1062,12 @@ void wl1271_tx_flush(struct wl1271 *wl)
}
wl1271_warning("Unable to flush all TX buffers, timed out.");
+
+ /* forcibly flush all Tx buffers on our queues */
+ mutex_lock(&wl->mutex);
+ for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ wl1271_tx_reset_link_queues(wl, i);
+ mutex_unlock(&wl->mutex);
}
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 2dbb24e6d541..5cf8c32d40d1 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -39,6 +39,7 @@
#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11))
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
+#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
@@ -51,7 +52,9 @@
#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
#define WL1271_TX_ALIGN_TO 4
-#define WL1271_TKIP_IV_SPACE 4
+#define WL1271_EXTRA_SPACE_TKIP 4
+#define WL1271_EXTRA_SPACE_AES 8
+#define WL1271_EXTRA_SPACE_MAX 8
/* Used for management frames and dummy packets */
#define WL1271_TID_MGMT 7
@@ -224,5 +227,6 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
+void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index b2b09cd02022..749a15a75d38 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,8 +35,14 @@
#include "conf.h"
#include "ini.h"
-#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
-#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
+#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
+#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
+
+#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
+#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
+
+#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
+#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
/*
* wl127x and wl128x are using the same NVS file name. However, the
@@ -90,7 +96,13 @@
enum wl1271_state {
WL1271_STATE_OFF,
WL1271_STATE_ON,
- WL1271_STATE_PLT,
+};
+
+enum wl12xx_fw_type {
+ WL12XX_FW_TYPE_NONE,
+ WL12XX_FW_TYPE_NORMAL,
+ WL12XX_FW_TYPE_MULTI,
+ WL12XX_FW_TYPE_PLT,
};
enum wl1271_partition_type {
@@ -247,15 +259,17 @@ enum wl12xx_flags {
WL1271_FLAG_PENDING_WORK,
WL1271_FLAG_SOFT_GEMINI,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
+ WL1271_FLAG_INTENDED_FW_RECOVERY,
};
enum wl12xx_vif_flags {
WLVIF_FLAG_INITIALIZED,
WLVIF_FLAG_STA_ASSOCIATED,
+ WLVIF_FLAG_STA_AUTHORIZED,
WLVIF_FLAG_IBSS_JOINED,
WLVIF_FLAG_AP_STARTED,
- WLVIF_FLAG_PSM,
- WLVIF_FLAG_PSM_REQUESTED,
+ WLVIF_FLAG_IN_PS,
WLVIF_FLAG_STA_STATE_SENT,
WLVIF_FLAG_RX_STREAMING_STARTED,
WLVIF_FLAG_PSPOLL_FAILURE,
@@ -295,6 +309,9 @@ struct wl1271 {
spinlock_t wl_lock;
enum wl1271_state state;
+ enum wl12xx_fw_type fw_type;
+ bool plt;
+ u8 last_vif_count;
struct mutex mutex;
unsigned long flags;
@@ -313,7 +330,12 @@ struct wl1271 {
s8 hw_pg_ver;
- u8 mac_addr[ETH_ALEN];
+ /* address read from the fuse ROM */
+ u32 fuse_oui_addr;
+ u32 fuse_nic_addr;
+
+ /* we have up to 2 MAC addresses */
+ struct mac_address addresses[2];
int channel;
u8 system_hlid;
@@ -425,8 +447,6 @@ struct wl1271 {
struct wl12xx_fw_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
- struct ieee80211_vif *vif;
-
/* Current chipset configuration */
struct conf_drv_settings conf;
@@ -434,8 +454,6 @@ struct wl1271 {
bool enable_11a;
- struct list_head list;
-
/* Most recently reported noise in dBm */
s8 noise;
@@ -477,6 +495,9 @@ struct wl1271 {
/* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif;
+
+ /* work to fire when Tx is stuck */
+ struct delayed_work tx_watchdog_work;
};
struct wl1271_station {
@@ -503,6 +524,8 @@ struct wl12xx_vif {
u8 basic_rate_idx;
u8 ap_rate_idx;
u8 p2p_rate_idx;
+
+ bool qos;
} sta;
struct {
u8 global_hlid;
@@ -560,12 +583,6 @@ struct wl12xx_vif {
/* Session counter for the chipset */
int session_counter;
- struct completion *ps_compl;
- struct delayed_work pspoll_work;
-
- /* counter for ps-poll delivery failures */
- int ps_poll_failures;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -575,6 +592,10 @@ struct wl12xx_vif {
int rssi_thold;
int last_rssi_event;
+ /* save the current encryption type for auto-arp config */
+ u8 encryption_type;
+ __be32 ip_addr;
+
/* RX BA constraint value */
bool ba_support;
bool ba_allowed;
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 8f0ffaf62309..22b0bc98d7b5 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -117,7 +117,7 @@ struct wl12xx_ps_poll_template {
} __packed;
struct wl12xx_arp_rsp_template {
- struct ieee80211_hdr_3addr hdr;
+ /* not including ieee80211 header */
u8 llc_hdr[sizeof(rfc1042_header)];
__be16 llc_type;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 98a574a4a465..9fcde36d5013 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -306,9 +306,19 @@ int zd_op_start(struct ieee80211_hw *hw)
r = set_mc_hash(mac);
if (r)
goto disable_int;
+
+ /* Wait after setting the multicast hash table and powering on
+ * the radio otherwise interface bring up will fail. This matches
+ * what the vendor driver did.
+ */
+ msleep(10);
+
r = zd_chip_switch_radio_on(chip);
- if (r < 0)
+ if (r < 0) {
+ dev_err(zd_chip_dev(chip),
+ "%s: failed to set radio on\n", __func__);
goto disable_int;
+ }
r = zd_chip_enable_rxtx(chip);
if (r < 0)
goto disable_radio;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 59effac15f36..2596401308a8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1639,10 +1639,8 @@ static int __init netback_init(void)
xen_netbk_group_nr = num_online_cpus();
xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
- if (!xen_netbk) {
- printk(KERN_ALERT "%s: out of memory\n", __func__);
+ if (!xen_netbk)
return -ENOMEM;
- }
for (group = 0; group < xen_netbk_group_nr; group++) {
struct xen_netbk *netbk = &xen_netbk[group];
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 698b905058dd..b16175032327 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -489,6 +489,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
+ unsigned long flags;
frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
@@ -498,12 +499,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- spin_lock_irq(&np->tx_lock);
+ spin_lock_irqsave(&np->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
- spin_unlock_irq(&np->tx_lock);
+ spin_unlock_irqrestore(&np->tx_lock, flags);
goto drop;
}
@@ -574,7 +575,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!netfront_tx_slot_available(np))
netif_stop_queue(dev);
- spin_unlock_irq(&np->tx_lock);
+ spin_unlock_irqrestore(&np->tx_lock, flags);
return NETDEV_TX_OK;
@@ -1228,6 +1229,33 @@ static int xennet_set_features(struct net_device *dev,
return 0;
}
+static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct netfront_info *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+
+ if (likely(netif_carrier_ok(dev))) {
+ xennet_tx_buf_gc(dev);
+ /* Under tx_lock: protects access to rx shared-ring indexes. */
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ napi_schedule(&np->napi);
+ }
+
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xennet_poll_controller(struct net_device *dev)
+{
+ xennet_interrupt(0, dev);
+}
+#endif
+
static const struct net_device_ops xennet_netdev_ops = {
.ndo_open = xennet_open,
.ndo_uninit = xennet_uninit,
@@ -1239,6 +1267,9 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = xennet_fix_features,
.ndo_set_features = xennet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xennet_poll_controller,
+#endif
};
static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
@@ -1248,11 +1279,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
struct netfront_info *np;
netdev = alloc_etherdev(sizeof(struct netfront_info));
- if (!netdev) {
- printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
- __func__);
+ if (!netdev)
return ERR_PTR(-ENOMEM);
- }
np = netdev_priv(netdev);
np->xbdev = dev;
@@ -1448,26 +1476,6 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
return 0;
}
-static irqreturn_t xennet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct netfront_info *np = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&np->tx_lock, flags);
-
- if (likely(netif_carrier_ok(dev))) {
- xennet_tx_buf_gc(dev);
- /* Under tx_lock: protects access to rx shared-ring indexes. */
- if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- napi_schedule(&np->napi);
- }
-
- spin_unlock_irqrestore(&np->tx_lock, flags);
-
- return IRQ_HANDLED;
-}
-
static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
{
struct xen_netif_tx_sring *txs;